Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kunit test for clk rate management
4 */
5#include <linux/clk.h>
6#include <linux/clk-provider.h>
7
8/* Needed for clk_hw_get_clk() */
9#include "clk.h"
10
11#include <kunit/test.h>
12
13static const struct clk_ops empty_clk_ops = { };
14
15#define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
16#define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000)
17#define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000)
18
19struct clk_dummy_context {
20 struct clk_hw hw;
21 unsigned long rate;
22};
23
24static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
25 unsigned long parent_rate)
26{
27 struct clk_dummy_context *ctx =
28 container_of(hw, struct clk_dummy_context, hw);
29
30 return ctx->rate;
31}
32
33static int clk_dummy_determine_rate(struct clk_hw *hw,
34 struct clk_rate_request *req)
35{
36 /* Just return the same rate without modifying it */
37 return 0;
38}
39
40static int clk_dummy_maximize_rate(struct clk_hw *hw,
41 struct clk_rate_request *req)
42{
43 /*
44 * If there's a maximum set, always run the clock at the maximum
45 * allowed.
46 */
47 if (req->max_rate < ULONG_MAX)
48 req->rate = req->max_rate;
49
50 return 0;
51}
52
53static int clk_dummy_minimize_rate(struct clk_hw *hw,
54 struct clk_rate_request *req)
55{
56 /*
57 * If there's a minimum set, always run the clock at the minimum
58 * allowed.
59 */
60 if (req->min_rate > 0)
61 req->rate = req->min_rate;
62
63 return 0;
64}
65
66static int clk_dummy_set_rate(struct clk_hw *hw,
67 unsigned long rate,
68 unsigned long parent_rate)
69{
70 struct clk_dummy_context *ctx =
71 container_of(hw, struct clk_dummy_context, hw);
72
73 ctx->rate = rate;
74 return 0;
75}
76
77static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
78{
79 if (index >= clk_hw_get_num_parents(hw))
80 return -EINVAL;
81
82 return 0;
83}
84
85static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
86{
87 return 0;
88}
89
90static const struct clk_ops clk_dummy_rate_ops = {
91 .recalc_rate = clk_dummy_recalc_rate,
92 .determine_rate = clk_dummy_determine_rate,
93 .set_rate = clk_dummy_set_rate,
94};
95
96static const struct clk_ops clk_dummy_maximize_rate_ops = {
97 .recalc_rate = clk_dummy_recalc_rate,
98 .determine_rate = clk_dummy_maximize_rate,
99 .set_rate = clk_dummy_set_rate,
100};
101
102static const struct clk_ops clk_dummy_minimize_rate_ops = {
103 .recalc_rate = clk_dummy_recalc_rate,
104 .determine_rate = clk_dummy_minimize_rate,
105 .set_rate = clk_dummy_set_rate,
106};
107
108static const struct clk_ops clk_dummy_single_parent_ops = {
109 /*
110 * FIXME: Even though we should probably be able to use
111 * __clk_mux_determine_rate() here, if we use it and call
112 * clk_round_rate() or clk_set_rate() with a rate lower than
113 * what all the parents can provide, it will return -EINVAL.
114 *
115 * This is due to the fact that it has the undocumented
116 * behaviour to always pick up the closest rate higher than the
117 * requested rate. If we get something lower, it thus considers
118 * that it's not acceptable and will return an error.
119 *
120 * It's somewhat inconsistent and creates a weird threshold
121 * between rates above the parent rate which would be rounded to
122 * what the parent can provide, but rates below will simply
123 * return an error.
124 */
125 .determine_rate = __clk_mux_determine_rate_closest,
126 .set_parent = clk_dummy_single_set_parent,
127 .get_parent = clk_dummy_single_get_parent,
128};
129
130struct clk_multiple_parent_ctx {
131 struct clk_dummy_context parents_ctx[2];
132 struct clk_hw hw;
133 u8 current_parent;
134};
135
136static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
137{
138 struct clk_multiple_parent_ctx *ctx =
139 container_of(hw, struct clk_multiple_parent_ctx, hw);
140
141 if (index >= clk_hw_get_num_parents(hw))
142 return -EINVAL;
143
144 ctx->current_parent = index;
145
146 return 0;
147}
148
149static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
150{
151 struct clk_multiple_parent_ctx *ctx =
152 container_of(hw, struct clk_multiple_parent_ctx, hw);
153
154 return ctx->current_parent;
155}
156
157static const struct clk_ops clk_multiple_parents_mux_ops = {
158 .get_parent = clk_multiple_parents_mux_get_parent,
159 .set_parent = clk_multiple_parents_mux_set_parent,
160 .determine_rate = __clk_mux_determine_rate_closest,
161};
162
163static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
164 .determine_rate = clk_hw_determine_rate_no_reparent,
165 .get_parent = clk_multiple_parents_mux_get_parent,
166 .set_parent = clk_multiple_parents_mux_set_parent,
167};
168
169static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
170{
171 struct clk_dummy_context *ctx;
172 struct clk_init_data init = { };
173 int ret;
174
175 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
176 if (!ctx)
177 return -ENOMEM;
178 ctx->rate = DUMMY_CLOCK_INIT_RATE;
179 test->priv = ctx;
180
181 init.name = "test_dummy_rate";
182 init.ops = ops;
183 ctx->hw.init = &init;
184
185 ret = clk_hw_register(NULL, &ctx->hw);
186 if (ret)
187 return ret;
188
189 return 0;
190}
191
192static int clk_test_init(struct kunit *test)
193{
194 return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
195}
196
197static int clk_maximize_test_init(struct kunit *test)
198{
199 return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
200}
201
202static int clk_minimize_test_init(struct kunit *test)
203{
204 return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
205}
206
207static void clk_test_exit(struct kunit *test)
208{
209 struct clk_dummy_context *ctx = test->priv;
210
211 clk_hw_unregister(&ctx->hw);
212}
213
214/*
215 * Test that the actual rate matches what is returned by clk_get_rate()
216 */
217static void clk_test_get_rate(struct kunit *test)
218{
219 struct clk_dummy_context *ctx = test->priv;
220 struct clk_hw *hw = &ctx->hw;
221 struct clk *clk = clk_hw_get_clk(hw, NULL);
222 unsigned long rate;
223
224 rate = clk_get_rate(clk);
225 KUNIT_ASSERT_GT(test, rate, 0);
226 KUNIT_EXPECT_EQ(test, rate, ctx->rate);
227
228 clk_put(clk);
229}
230
231/*
232 * Test that, after a call to clk_set_rate(), the rate returned by
233 * clk_get_rate() matches.
234 *
235 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
236 * modify the requested rate, which is our case in clk_dummy_rate_ops.
237 */
238static void clk_test_set_get_rate(struct kunit *test)
239{
240 struct clk_dummy_context *ctx = test->priv;
241 struct clk_hw *hw = &ctx->hw;
242 struct clk *clk = clk_hw_get_clk(hw, NULL);
243 unsigned long rate;
244
245 KUNIT_ASSERT_EQ(test,
246 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
247 0);
248
249 rate = clk_get_rate(clk);
250 KUNIT_ASSERT_GT(test, rate, 0);
251 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
252
253 clk_put(clk);
254}
255
256/*
257 * Test that, after several calls to clk_set_rate(), the rate returned
258 * by clk_get_rate() matches the last one.
259 *
260 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
261 * modify the requested rate, which is our case in clk_dummy_rate_ops.
262 */
263static void clk_test_set_set_get_rate(struct kunit *test)
264{
265 struct clk_dummy_context *ctx = test->priv;
266 struct clk_hw *hw = &ctx->hw;
267 struct clk *clk = clk_hw_get_clk(hw, NULL);
268 unsigned long rate;
269
270 KUNIT_ASSERT_EQ(test,
271 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
272 0);
273
274 KUNIT_ASSERT_EQ(test,
275 clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
276 0);
277
278 rate = clk_get_rate(clk);
279 KUNIT_ASSERT_GT(test, rate, 0);
280 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
281
282 clk_put(clk);
283}
284
285/*
286 * Test that clk_round_rate and clk_set_rate are consitent and will
287 * return the same frequency.
288 */
289static void clk_test_round_set_get_rate(struct kunit *test)
290{
291 struct clk_dummy_context *ctx = test->priv;
292 struct clk_hw *hw = &ctx->hw;
293 struct clk *clk = clk_hw_get_clk(hw, NULL);
294 unsigned long set_rate;
295 long rounded_rate;
296
297 rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
298 KUNIT_ASSERT_GT(test, rounded_rate, 0);
299 KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
300
301 KUNIT_ASSERT_EQ(test,
302 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
303 0);
304
305 set_rate = clk_get_rate(clk);
306 KUNIT_ASSERT_GT(test, set_rate, 0);
307 KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
308
309 clk_put(clk);
310}
311
312static struct kunit_case clk_test_cases[] = {
313 KUNIT_CASE(clk_test_get_rate),
314 KUNIT_CASE(clk_test_set_get_rate),
315 KUNIT_CASE(clk_test_set_set_get_rate),
316 KUNIT_CASE(clk_test_round_set_get_rate),
317 {}
318};
319
320/*
321 * Test suite for a basic rate clock, without any parent.
322 *
323 * These tests exercise the rate API with simple scenarios
324 */
325static struct kunit_suite clk_test_suite = {
326 .name = "clk-test",
327 .init = clk_test_init,
328 .exit = clk_test_exit,
329 .test_cases = clk_test_cases,
330};
331
332static int clk_uncached_test_init(struct kunit *test)
333{
334 struct clk_dummy_context *ctx;
335 int ret;
336
337 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
338 if (!ctx)
339 return -ENOMEM;
340 test->priv = ctx;
341
342 ctx->rate = DUMMY_CLOCK_INIT_RATE;
343 ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
344 &clk_dummy_rate_ops,
345 CLK_GET_RATE_NOCACHE);
346
347 ret = clk_hw_register(NULL, &ctx->hw);
348 if (ret)
349 return ret;
350
351 return 0;
352}
353
354/*
355 * Test that for an uncached clock, the clock framework doesn't cache
356 * the rate and clk_get_rate() will return the underlying clock rate
357 * even if it changed.
358 */
359static void clk_test_uncached_get_rate(struct kunit *test)
360{
361 struct clk_dummy_context *ctx = test->priv;
362 struct clk_hw *hw = &ctx->hw;
363 struct clk *clk = clk_hw_get_clk(hw, NULL);
364 unsigned long rate;
365
366 rate = clk_get_rate(clk);
367 KUNIT_ASSERT_GT(test, rate, 0);
368 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
369
370 /* We change the rate behind the clock framework's back */
371 ctx->rate = DUMMY_CLOCK_RATE_1;
372 rate = clk_get_rate(clk);
373 KUNIT_ASSERT_GT(test, rate, 0);
374 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
375
376 clk_put(clk);
377}
378
379/*
380 * Test that for an uncached clock, clk_set_rate_range() will work
381 * properly if the rate hasn't changed.
382 */
383static void clk_test_uncached_set_range(struct kunit *test)
384{
385 struct clk_dummy_context *ctx = test->priv;
386 struct clk_hw *hw = &ctx->hw;
387 struct clk *clk = clk_hw_get_clk(hw, NULL);
388 unsigned long rate;
389
390 KUNIT_ASSERT_EQ(test,
391 clk_set_rate_range(clk,
392 DUMMY_CLOCK_RATE_1,
393 DUMMY_CLOCK_RATE_2),
394 0);
395
396 rate = clk_get_rate(clk);
397 KUNIT_ASSERT_GT(test, rate, 0);
398 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
399 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
400
401 clk_put(clk);
402}
403
404/*
405 * Test that for an uncached clock, clk_set_rate_range() will work
406 * properly if the rate has changed in hardware.
407 *
408 * In this case, it means that if the rate wasn't initially in the range
409 * we're trying to set, but got changed at some point into the range
410 * without the kernel knowing about it, its rate shouldn't be affected.
411 */
412static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
413{
414 struct clk_dummy_context *ctx = test->priv;
415 struct clk_hw *hw = &ctx->hw;
416 struct clk *clk = clk_hw_get_clk(hw, NULL);
417 unsigned long rate;
418
419 /* We change the rate behind the clock framework's back */
420 ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
421 KUNIT_ASSERT_EQ(test,
422 clk_set_rate_range(clk,
423 DUMMY_CLOCK_RATE_1,
424 DUMMY_CLOCK_RATE_2),
425 0);
426
427 rate = clk_get_rate(clk);
428 KUNIT_ASSERT_GT(test, rate, 0);
429 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
430
431 clk_put(clk);
432}
433
434static struct kunit_case clk_uncached_test_cases[] = {
435 KUNIT_CASE(clk_test_uncached_get_rate),
436 KUNIT_CASE(clk_test_uncached_set_range),
437 KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
438 {}
439};
440
441/*
442 * Test suite for a basic, uncached, rate clock, without any parent.
443 *
444 * These tests exercise the rate API with simple scenarios
445 */
446static struct kunit_suite clk_uncached_test_suite = {
447 .name = "clk-uncached-test",
448 .init = clk_uncached_test_init,
449 .exit = clk_test_exit,
450 .test_cases = clk_uncached_test_cases,
451};
452
453static int
454clk_multiple_parents_mux_test_init(struct kunit *test)
455{
456 struct clk_multiple_parent_ctx *ctx;
457 const char *parents[2] = { "parent-0", "parent-1"};
458 int ret;
459
460 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
461 if (!ctx)
462 return -ENOMEM;
463 test->priv = ctx;
464
465 ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
466 &clk_dummy_rate_ops,
467 0);
468 ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
469 ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
470 if (ret)
471 return ret;
472
473 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
474 &clk_dummy_rate_ops,
475 0);
476 ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
477 ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
478 if (ret)
479 return ret;
480
481 ctx->current_parent = 0;
482 ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
483 &clk_multiple_parents_mux_ops,
484 CLK_SET_RATE_PARENT);
485 ret = clk_hw_register(NULL, &ctx->hw);
486 if (ret)
487 return ret;
488
489 return 0;
490}
491
492static void
493clk_multiple_parents_mux_test_exit(struct kunit *test)
494{
495 struct clk_multiple_parent_ctx *ctx = test->priv;
496
497 clk_hw_unregister(&ctx->hw);
498 clk_hw_unregister(&ctx->parents_ctx[0].hw);
499 clk_hw_unregister(&ctx->parents_ctx[1].hw);
500}
501
502/*
503 * Test that for a clock with multiple parents, clk_get_parent()
504 * actually returns the current one.
505 */
506static void
507clk_test_multiple_parents_mux_get_parent(struct kunit *test)
508{
509 struct clk_multiple_parent_ctx *ctx = test->priv;
510 struct clk_hw *hw = &ctx->hw;
511 struct clk *clk = clk_hw_get_clk(hw, NULL);
512 struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
513
514 KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
515
516 clk_put(parent);
517 clk_put(clk);
518}
519
520/*
521 * Test that for a clock with a multiple parents, clk_has_parent()
522 * actually reports all of them as parents.
523 */
524static void
525clk_test_multiple_parents_mux_has_parent(struct kunit *test)
526{
527 struct clk_multiple_parent_ctx *ctx = test->priv;
528 struct clk_hw *hw = &ctx->hw;
529 struct clk *clk = clk_hw_get_clk(hw, NULL);
530 struct clk *parent;
531
532 parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
533 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
534 clk_put(parent);
535
536 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
537 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
538 clk_put(parent);
539
540 clk_put(clk);
541}
542
543/*
544 * Test that for a clock with a multiple parents, if we set a range on
545 * that clock and the parent is changed, its rate after the reparenting
546 * is still within the range we asked for.
547 *
548 * FIXME: clk_set_parent() only does the reparenting but doesn't
549 * reevaluate whether the new clock rate is within its boundaries or
550 * not.
551 */
552static void
553clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
554{
555 struct clk_multiple_parent_ctx *ctx = test->priv;
556 struct clk_hw *hw = &ctx->hw;
557 struct clk *clk = clk_hw_get_clk(hw, NULL);
558 struct clk *parent1, *parent2;
559 unsigned long rate;
560 int ret;
561
562 kunit_skip(test, "This needs to be fixed in the core.");
563
564 parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
565 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
566 KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
567
568 parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
569 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
570
571 ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
572 KUNIT_ASSERT_EQ(test, ret, 0);
573
574 ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
575 KUNIT_ASSERT_EQ(test, ret, 0);
576
577 ret = clk_set_rate_range(clk,
578 DUMMY_CLOCK_RATE_1 - 1000,
579 DUMMY_CLOCK_RATE_1 + 1000);
580 KUNIT_ASSERT_EQ(test, ret, 0);
581
582 ret = clk_set_parent(clk, parent2);
583 KUNIT_ASSERT_EQ(test, ret, 0);
584
585 rate = clk_get_rate(clk);
586 KUNIT_ASSERT_GT(test, rate, 0);
587 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
588 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
589
590 clk_put(parent2);
591 clk_put(parent1);
592 clk_put(clk);
593}
594
595static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
596 KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
597 KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
598 KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
599 {}
600};
601
602/*
603 * Test suite for a basic mux clock with two parents, with
604 * CLK_SET_RATE_PARENT on the child.
605 *
606 * These tests exercise the consumer API and check that the state of the
607 * child and parents are sane and consistent.
608 */
609static struct kunit_suite
610clk_multiple_parents_mux_test_suite = {
611 .name = "clk-multiple-parents-mux-test",
612 .init = clk_multiple_parents_mux_test_init,
613 .exit = clk_multiple_parents_mux_test_exit,
614 .test_cases = clk_multiple_parents_mux_test_cases,
615};
616
617static int
618clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
619{
620 struct clk_multiple_parent_ctx *ctx;
621 const char *parents[2] = { "missing-parent", "proper-parent"};
622 int ret;
623
624 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
625 if (!ctx)
626 return -ENOMEM;
627 test->priv = ctx;
628
629 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
630 &clk_dummy_rate_ops,
631 0);
632 ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
633 ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
634 if (ret)
635 return ret;
636
637 ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
638 &clk_multiple_parents_mux_ops,
639 CLK_SET_RATE_PARENT);
640 ret = clk_hw_register(NULL, &ctx->hw);
641 if (ret)
642 return ret;
643
644 return 0;
645}
646
647static void
648clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
649{
650 struct clk_multiple_parent_ctx *ctx = test->priv;
651
652 clk_hw_unregister(&ctx->hw);
653 clk_hw_unregister(&ctx->parents_ctx[1].hw);
654}
655
656/*
657 * Test that, for a mux whose current parent hasn't been registered yet and is
658 * thus orphan, clk_get_parent() will return NULL.
659 */
660static void
661clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
662{
663 struct clk_multiple_parent_ctx *ctx = test->priv;
664 struct clk_hw *hw = &ctx->hw;
665 struct clk *clk = clk_hw_get_clk(hw, NULL);
666
667 KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
668
669 clk_put(clk);
670}
671
672/*
673 * Test that, for a mux whose current parent hasn't been registered yet,
674 * calling clk_set_parent() to a valid parent will properly update the
675 * mux parent and its orphan status.
676 */
677static void
678clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
679{
680 struct clk_multiple_parent_ctx *ctx = test->priv;
681 struct clk_hw *hw = &ctx->hw;
682 struct clk *clk = clk_hw_get_clk(hw, NULL);
683 struct clk *parent, *new_parent;
684 int ret;
685
686 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
687 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
688
689 ret = clk_set_parent(clk, parent);
690 KUNIT_ASSERT_EQ(test, ret, 0);
691
692 new_parent = clk_get_parent(clk);
693 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
694 KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
695
696 clk_put(parent);
697 clk_put(clk);
698}
699
700/*
701 * Test that, for a mux that started orphan but got switched to a valid
702 * parent, calling clk_drop_range() on the mux won't affect the parent
703 * rate.
704 */
705static void
706clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
707{
708 struct clk_multiple_parent_ctx *ctx = test->priv;
709 struct clk_hw *hw = &ctx->hw;
710 struct clk *clk = clk_hw_get_clk(hw, NULL);
711 struct clk *parent;
712 unsigned long parent_rate, new_parent_rate;
713 int ret;
714
715 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
716 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
717
718 parent_rate = clk_get_rate(parent);
719 KUNIT_ASSERT_GT(test, parent_rate, 0);
720
721 ret = clk_set_parent(clk, parent);
722 KUNIT_ASSERT_EQ(test, ret, 0);
723
724 ret = clk_drop_range(clk);
725 KUNIT_ASSERT_EQ(test, ret, 0);
726
727 new_parent_rate = clk_get_rate(clk);
728 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
729 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
730
731 clk_put(parent);
732 clk_put(clk);
733}
734
735/*
736 * Test that, for a mux that started orphan but got switched to a valid
737 * parent, the rate of the mux and its new parent are consistent.
738 */
739static void
740clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
741{
742 struct clk_multiple_parent_ctx *ctx = test->priv;
743 struct clk_hw *hw = &ctx->hw;
744 struct clk *clk = clk_hw_get_clk(hw, NULL);
745 struct clk *parent;
746 unsigned long parent_rate, rate;
747 int ret;
748
749 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
750 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
751
752 parent_rate = clk_get_rate(parent);
753 KUNIT_ASSERT_GT(test, parent_rate, 0);
754
755 ret = clk_set_parent(clk, parent);
756 KUNIT_ASSERT_EQ(test, ret, 0);
757
758 rate = clk_get_rate(clk);
759 KUNIT_ASSERT_GT(test, rate, 0);
760 KUNIT_EXPECT_EQ(test, parent_rate, rate);
761
762 clk_put(parent);
763 clk_put(clk);
764}
765
766/*
767 * Test that, for a mux that started orphan but got switched to a valid
768 * parent, calling clk_put() on the mux won't affect the parent rate.
769 */
770static void
771clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
772{
773 struct clk_multiple_parent_ctx *ctx = test->priv;
774 struct clk *clk, *parent;
775 unsigned long parent_rate, new_parent_rate;
776 int ret;
777
778 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
779 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
780
781 clk = clk_hw_get_clk(&ctx->hw, NULL);
782 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
783
784 parent_rate = clk_get_rate(parent);
785 KUNIT_ASSERT_GT(test, parent_rate, 0);
786
787 ret = clk_set_parent(clk, parent);
788 KUNIT_ASSERT_EQ(test, ret, 0);
789
790 clk_put(clk);
791
792 new_parent_rate = clk_get_rate(parent);
793 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
794 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
795
796 clk_put(parent);
797}
798
799/*
800 * Test that, for a mux that started orphan but got switched to a valid
801 * parent, calling clk_set_rate_range() will affect the parent state if
802 * its rate is out of range.
803 */
804static void
805clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
806{
807 struct clk_multiple_parent_ctx *ctx = test->priv;
808 struct clk_hw *hw = &ctx->hw;
809 struct clk *clk = clk_hw_get_clk(hw, NULL);
810 struct clk *parent;
811 unsigned long rate;
812 int ret;
813
814 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
815 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
816
817 ret = clk_set_parent(clk, parent);
818 KUNIT_ASSERT_EQ(test, ret, 0);
819
820 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
821 KUNIT_ASSERT_EQ(test, ret, 0);
822
823 rate = clk_get_rate(clk);
824 KUNIT_ASSERT_GT(test, rate, 0);
825 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
826 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
827
828 clk_put(parent);
829 clk_put(clk);
830}
831
832/*
833 * Test that, for a mux that started orphan but got switched to a valid
834 * parent, calling clk_set_rate_range() won't affect the parent state if
835 * its rate is within range.
836 */
837static void
838clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
839{
840 struct clk_multiple_parent_ctx *ctx = test->priv;
841 struct clk_hw *hw = &ctx->hw;
842 struct clk *clk = clk_hw_get_clk(hw, NULL);
843 struct clk *parent;
844 unsigned long parent_rate, new_parent_rate;
845 int ret;
846
847 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
848 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
849
850 parent_rate = clk_get_rate(parent);
851 KUNIT_ASSERT_GT(test, parent_rate, 0);
852
853 ret = clk_set_parent(clk, parent);
854 KUNIT_ASSERT_EQ(test, ret, 0);
855
856 ret = clk_set_rate_range(clk,
857 DUMMY_CLOCK_INIT_RATE - 1000,
858 DUMMY_CLOCK_INIT_RATE + 1000);
859 KUNIT_ASSERT_EQ(test, ret, 0);
860
861 new_parent_rate = clk_get_rate(parent);
862 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
863 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
864
865 clk_put(parent);
866 clk_put(clk);
867}
868
869/*
870 * Test that, for a mux whose current parent hasn't been registered yet,
871 * calling clk_set_rate_range() will succeed, and will be taken into
872 * account when rounding a rate.
873 */
874static void
875clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
876{
877 struct clk_multiple_parent_ctx *ctx = test->priv;
878 struct clk_hw *hw = &ctx->hw;
879 struct clk *clk = clk_hw_get_clk(hw, NULL);
880 long rate;
881 int ret;
882
883 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
884 KUNIT_ASSERT_EQ(test, ret, 0);
885
886 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
887 KUNIT_ASSERT_GT(test, rate, 0);
888 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
889 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
890
891 clk_put(clk);
892}
893
894/*
895 * Test that, for a mux that started orphan, was assigned and rate and
896 * then got switched to a valid parent, its rate is eventually within
897 * range.
898 *
899 * FIXME: Even though we update the rate as part of clk_set_parent(), we
900 * don't evaluate whether that new rate is within range and needs to be
901 * adjusted.
902 */
903static void
904clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
905{
906 struct clk_multiple_parent_ctx *ctx = test->priv;
907 struct clk_hw *hw = &ctx->hw;
908 struct clk *clk = clk_hw_get_clk(hw, NULL);
909 struct clk *parent;
910 unsigned long rate;
911 int ret;
912
913 kunit_skip(test, "This needs to be fixed in the core.");
914
915 clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
916
917 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
918 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
919
920 ret = clk_set_parent(clk, parent);
921 KUNIT_ASSERT_EQ(test, ret, 0);
922
923 rate = clk_get_rate(clk);
924 KUNIT_ASSERT_GT(test, rate, 0);
925 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
926 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
927
928 clk_put(parent);
929 clk_put(clk);
930}
931
932static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
933 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
934 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
935 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
936 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
937 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
938 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
939 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
940 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
941 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
942 {}
943};
944
945/*
946 * Test suite for a basic mux clock with two parents. The default parent
947 * isn't registered, only the second parent is. By default, the clock
948 * will thus be orphan.
949 *
950 * These tests exercise the behaviour of the consumer API when dealing
951 * with an orphan clock, and how we deal with the transition to a valid
952 * parent.
953 */
954static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
955 .name = "clk-orphan-transparent-multiple-parent-mux-test",
956 .init = clk_orphan_transparent_multiple_parent_mux_test_init,
957 .exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
958 .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
959};
960
961struct clk_single_parent_ctx {
962 struct clk_dummy_context parent_ctx;
963 struct clk_hw hw;
964};
965
966static int clk_single_parent_mux_test_init(struct kunit *test)
967{
968 struct clk_single_parent_ctx *ctx;
969 int ret;
970
971 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
972 if (!ctx)
973 return -ENOMEM;
974 test->priv = ctx;
975
976 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
977 ctx->parent_ctx.hw.init =
978 CLK_HW_INIT_NO_PARENT("parent-clk",
979 &clk_dummy_rate_ops,
980 0);
981
982 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
983 if (ret)
984 return ret;
985
986 ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
987 &clk_dummy_single_parent_ops,
988 CLK_SET_RATE_PARENT);
989
990 ret = clk_hw_register(NULL, &ctx->hw);
991 if (ret)
992 return ret;
993
994 return 0;
995}
996
997static void
998clk_single_parent_mux_test_exit(struct kunit *test)
999{
1000 struct clk_single_parent_ctx *ctx = test->priv;
1001
1002 clk_hw_unregister(&ctx->hw);
1003 clk_hw_unregister(&ctx->parent_ctx.hw);
1004}
1005
1006/*
1007 * Test that for a clock with a single parent, clk_get_parent() actually
1008 * returns the parent.
1009 */
1010static void
1011clk_test_single_parent_mux_get_parent(struct kunit *test)
1012{
1013 struct clk_single_parent_ctx *ctx = test->priv;
1014 struct clk_hw *hw = &ctx->hw;
1015 struct clk *clk = clk_hw_get_clk(hw, NULL);
1016 struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1017
1018 KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
1019
1020 clk_put(parent);
1021 clk_put(clk);
1022}
1023
1024/*
1025 * Test that for a clock with a single parent, clk_has_parent() actually
1026 * reports it as a parent.
1027 */
1028static void
1029clk_test_single_parent_mux_has_parent(struct kunit *test)
1030{
1031 struct clk_single_parent_ctx *ctx = test->priv;
1032 struct clk_hw *hw = &ctx->hw;
1033 struct clk *clk = clk_hw_get_clk(hw, NULL);
1034 struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1035
1036 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1037
1038 clk_put(parent);
1039 clk_put(clk);
1040}
1041
1042/*
1043 * Test that for a clock that can't modify its rate and with a single
1044 * parent, if we set disjoints range on the parent and then the child,
1045 * the second will return an error.
1046 *
1047 * FIXME: clk_set_rate_range() only considers the current clock when
1048 * evaluating whether ranges are disjoints and not the upstream clocks
1049 * ranges.
1050 */
1051static void
1052clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1053{
1054 struct clk_single_parent_ctx *ctx = test->priv;
1055 struct clk_hw *hw = &ctx->hw;
1056 struct clk *clk = clk_hw_get_clk(hw, NULL);
1057 struct clk *parent;
1058 int ret;
1059
1060 kunit_skip(test, "This needs to be fixed in the core.");
1061
1062 parent = clk_get_parent(clk);
1063 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1064
1065 ret = clk_set_rate_range(parent, 1000, 2000);
1066 KUNIT_ASSERT_EQ(test, ret, 0);
1067
1068 ret = clk_set_rate_range(clk, 3000, 4000);
1069 KUNIT_EXPECT_LT(test, ret, 0);
1070
1071 clk_put(clk);
1072}
1073
1074/*
1075 * Test that for a clock that can't modify its rate and with a single
1076 * parent, if we set disjoints range on the child and then the parent,
1077 * the second will return an error.
1078 *
1079 * FIXME: clk_set_rate_range() only considers the current clock when
1080 * evaluating whether ranges are disjoints and not the downstream clocks
1081 * ranges.
1082 */
1083static void
1084clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1085{
1086 struct clk_single_parent_ctx *ctx = test->priv;
1087 struct clk_hw *hw = &ctx->hw;
1088 struct clk *clk = clk_hw_get_clk(hw, NULL);
1089 struct clk *parent;
1090 int ret;
1091
1092 kunit_skip(test, "This needs to be fixed in the core.");
1093
1094 parent = clk_get_parent(clk);
1095 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1096
1097 ret = clk_set_rate_range(clk, 1000, 2000);
1098 KUNIT_ASSERT_EQ(test, ret, 0);
1099
1100 ret = clk_set_rate_range(parent, 3000, 4000);
1101 KUNIT_EXPECT_LT(test, ret, 0);
1102
1103 clk_put(clk);
1104}
1105
1106/*
1107 * Test that for a clock that can't modify its rate and with a single
1108 * parent, if we set a range on the parent and then call
1109 * clk_round_rate(), the boundaries of the parent are taken into
1110 * account.
1111 */
1112static void
1113clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1114{
1115 struct clk_single_parent_ctx *ctx = test->priv;
1116 struct clk_hw *hw = &ctx->hw;
1117 struct clk *clk = clk_hw_get_clk(hw, NULL);
1118 struct clk *parent;
1119 long rate;
1120 int ret;
1121
1122 parent = clk_get_parent(clk);
1123 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1124
1125 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1126 KUNIT_ASSERT_EQ(test, ret, 0);
1127
1128 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1129 KUNIT_ASSERT_GT(test, rate, 0);
1130 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1131 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1132
1133 clk_put(clk);
1134}
1135
1136/*
1137 * Test that for a clock that can't modify its rate and with a single
1138 * parent, if we set a range on the parent and a more restrictive one on
1139 * the child, and then call clk_round_rate(), the boundaries of the
1140 * two clocks are taken into account.
1141 */
1142static void
1143clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1144{
1145 struct clk_single_parent_ctx *ctx = test->priv;
1146 struct clk_hw *hw = &ctx->hw;
1147 struct clk *clk = clk_hw_get_clk(hw, NULL);
1148 struct clk *parent;
1149 long rate;
1150 int ret;
1151
1152 parent = clk_get_parent(clk);
1153 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1154
1155 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1156 KUNIT_ASSERT_EQ(test, ret, 0);
1157
1158 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1159 KUNIT_ASSERT_EQ(test, ret, 0);
1160
1161 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1162 KUNIT_ASSERT_GT(test, rate, 0);
1163 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1164 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1165
1166 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1167 KUNIT_ASSERT_GT(test, rate, 0);
1168 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1169 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1170
1171 clk_put(clk);
1172}
1173
1174/*
1175 * Test that for a clock that can't modify its rate and with a single
1176 * parent, if we set a range on the child and a more restrictive one on
1177 * the parent, and then call clk_round_rate(), the boundaries of the
1178 * two clocks are taken into account.
1179 */
1180static void
1181clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1182{
1183 struct clk_single_parent_ctx *ctx = test->priv;
1184 struct clk_hw *hw = &ctx->hw;
1185 struct clk *clk = clk_hw_get_clk(hw, NULL);
1186 struct clk *parent;
1187 long rate;
1188 int ret;
1189
1190 parent = clk_get_parent(clk);
1191 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1192
1193 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1194 KUNIT_ASSERT_EQ(test, ret, 0);
1195
1196 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1197 KUNIT_ASSERT_EQ(test, ret, 0);
1198
1199 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1200 KUNIT_ASSERT_GT(test, rate, 0);
1201 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1202 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1203
1204 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1205 KUNIT_ASSERT_GT(test, rate, 0);
1206 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1207 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1208
1209 clk_put(clk);
1210}
1211
1212static struct kunit_case clk_single_parent_mux_test_cases[] = {
1213 KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1214 KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1215 KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1216 KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1217 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1218 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1219 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1220 {}
1221};
1222
1223/*
1224 * Test suite for a basic mux clock with one parent, with
1225 * CLK_SET_RATE_PARENT on the child.
1226 *
1227 * These tests exercise the consumer API and check that the state of the
1228 * child and parent are sane and consistent.
1229 */
1230static struct kunit_suite
1231clk_single_parent_mux_test_suite = {
1232 .name = "clk-single-parent-mux-test",
1233 .init = clk_single_parent_mux_test_init,
1234 .exit = clk_single_parent_mux_test_exit,
1235 .test_cases = clk_single_parent_mux_test_cases,
1236};
1237
1238static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1239{
1240 struct clk_single_parent_ctx *ctx;
1241 struct clk_init_data init = { };
1242 const char * const parents[] = { "orphan_parent" };
1243 int ret;
1244
1245 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1246 if (!ctx)
1247 return -ENOMEM;
1248 test->priv = ctx;
1249
1250 init.name = "test_orphan_dummy_parent";
1251 init.ops = &clk_dummy_single_parent_ops;
1252 init.parent_names = parents;
1253 init.num_parents = ARRAY_SIZE(parents);
1254 init.flags = CLK_SET_RATE_PARENT;
1255 ctx->hw.init = &init;
1256
1257 ret = clk_hw_register(NULL, &ctx->hw);
1258 if (ret)
1259 return ret;
1260
1261 memset(&init, 0, sizeof(init));
1262 init.name = "orphan_parent";
1263 init.ops = &clk_dummy_rate_ops;
1264 ctx->parent_ctx.hw.init = &init;
1265 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1266
1267 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1268 if (ret)
1269 return ret;
1270
1271 return 0;
1272}
1273
1274/*
1275 * Test that a mux-only clock, with an initial rate within a range,
1276 * will still have the same rate after the range has been enforced.
1277 *
1278 * See:
1279 * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1280 */
1281static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1282{
1283 struct clk_single_parent_ctx *ctx = test->priv;
1284 struct clk_hw *hw = &ctx->hw;
1285 struct clk *clk = clk_hw_get_clk(hw, NULL);
1286 unsigned long rate, new_rate;
1287
1288 rate = clk_get_rate(clk);
1289 KUNIT_ASSERT_GT(test, rate, 0);
1290
1291 KUNIT_ASSERT_EQ(test,
1292 clk_set_rate_range(clk,
1293 ctx->parent_ctx.rate - 1000,
1294 ctx->parent_ctx.rate + 1000),
1295 0);
1296
1297 new_rate = clk_get_rate(clk);
1298 KUNIT_ASSERT_GT(test, new_rate, 0);
1299 KUNIT_EXPECT_EQ(test, rate, new_rate);
1300
1301 clk_put(clk);
1302}
1303
1304static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1305 KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1306 {}
1307};
1308
1309/*
1310 * Test suite for a basic mux clock with one parent. The parent is
1311 * registered after its child. The clock will thus be an orphan when
1312 * registered, but will no longer be when the tests run.
1313 *
1314 * These tests make sure a clock that used to be orphan has a sane,
1315 * consistent, behaviour.
1316 */
1317static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1318 .name = "clk-orphan-transparent-single-parent-test",
1319 .init = clk_orphan_transparent_single_parent_mux_test_init,
1320 .exit = clk_single_parent_mux_test_exit,
1321 .test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1322};
1323
1324struct clk_single_parent_two_lvl_ctx {
1325 struct clk_dummy_context parent_parent_ctx;
1326 struct clk_dummy_context parent_ctx;
1327 struct clk_hw hw;
1328};
1329
1330static int
1331clk_orphan_two_level_root_last_test_init(struct kunit *test)
1332{
1333 struct clk_single_parent_two_lvl_ctx *ctx;
1334 int ret;
1335
1336 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1337 if (!ctx)
1338 return -ENOMEM;
1339 test->priv = ctx;
1340
1341 ctx->parent_ctx.hw.init =
1342 CLK_HW_INIT("intermediate-parent",
1343 "root-parent",
1344 &clk_dummy_single_parent_ops,
1345 CLK_SET_RATE_PARENT);
1346 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1347 if (ret)
1348 return ret;
1349
1350 ctx->hw.init =
1351 CLK_HW_INIT("test-clk", "intermediate-parent",
1352 &clk_dummy_single_parent_ops,
1353 CLK_SET_RATE_PARENT);
1354 ret = clk_hw_register(NULL, &ctx->hw);
1355 if (ret)
1356 return ret;
1357
1358 ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1359 ctx->parent_parent_ctx.hw.init =
1360 CLK_HW_INIT_NO_PARENT("root-parent",
1361 &clk_dummy_rate_ops,
1362 0);
1363 ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1364 if (ret)
1365 return ret;
1366
1367 return 0;
1368}
1369
1370static void
1371clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1372{
1373 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1374
1375 clk_hw_unregister(&ctx->hw);
1376 clk_hw_unregister(&ctx->parent_ctx.hw);
1377 clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1378}
1379
1380/*
1381 * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1382 * will return the proper rate.
1383 */
1384static void
1385clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1386{
1387 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1388 struct clk_hw *hw = &ctx->hw;
1389 struct clk *clk = clk_hw_get_clk(hw, NULL);
1390 unsigned long rate;
1391
1392 rate = clk_get_rate(clk);
1393 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1394
1395 clk_put(clk);
1396}
1397
1398/*
1399 * Test that, for a clock whose parent used to be orphan,
1400 * clk_set_rate_range() won't affect its rate if it is already within
1401 * range.
1402 *
1403 * See (for Exynos 4210):
1404 * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1405 */
1406static void
1407clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1408{
1409 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1410 struct clk_hw *hw = &ctx->hw;
1411 struct clk *clk = clk_hw_get_clk(hw, NULL);
1412 unsigned long rate;
1413 int ret;
1414
1415 ret = clk_set_rate_range(clk,
1416 DUMMY_CLOCK_INIT_RATE - 1000,
1417 DUMMY_CLOCK_INIT_RATE + 1000);
1418 KUNIT_ASSERT_EQ(test, ret, 0);
1419
1420 rate = clk_get_rate(clk);
1421 KUNIT_ASSERT_GT(test, rate, 0);
1422 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1423
1424 clk_put(clk);
1425}
1426
1427static struct kunit_case
1428clk_orphan_two_level_root_last_test_cases[] = {
1429 KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1430 KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1431 {}
1432};
1433
1434/*
1435 * Test suite for a basic, transparent, clock with a parent that is also
1436 * such a clock. The parent's parent is registered last, while the
1437 * parent and its child are registered in that order. The intermediate
1438 * and leaf clocks will thus be orphan when registered, but the leaf
1439 * clock itself will always have its parent and will never be
1440 * reparented. Indeed, it's only orphan because its parent is.
1441 *
1442 * These tests exercise the behaviour of the consumer API when dealing
1443 * with an orphan clock, and how we deal with the transition to a valid
1444 * parent.
1445 */
1446static struct kunit_suite
1447clk_orphan_two_level_root_last_test_suite = {
1448 .name = "clk-orphan-two-level-root-last-test",
1449 .init = clk_orphan_two_level_root_last_test_init,
1450 .exit = clk_orphan_two_level_root_last_test_exit,
1451 .test_cases = clk_orphan_two_level_root_last_test_cases,
1452};
1453
1454/*
1455 * Test that clk_set_rate_range won't return an error for a valid range
1456 * and that it will make sure the rate of the clock is within the
1457 * boundaries.
1458 */
1459static void clk_range_test_set_range(struct kunit *test)
1460{
1461 struct clk_dummy_context *ctx = test->priv;
1462 struct clk_hw *hw = &ctx->hw;
1463 struct clk *clk = clk_hw_get_clk(hw, NULL);
1464 unsigned long rate;
1465
1466 KUNIT_ASSERT_EQ(test,
1467 clk_set_rate_range(clk,
1468 DUMMY_CLOCK_RATE_1,
1469 DUMMY_CLOCK_RATE_2),
1470 0);
1471
1472 rate = clk_get_rate(clk);
1473 KUNIT_ASSERT_GT(test, rate, 0);
1474 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1475 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1476
1477 clk_put(clk);
1478}
1479
1480/*
1481 * Test that calling clk_set_rate_range with a minimum rate higher than
1482 * the maximum rate returns an error.
1483 */
1484static void clk_range_test_set_range_invalid(struct kunit *test)
1485{
1486 struct clk_dummy_context *ctx = test->priv;
1487 struct clk_hw *hw = &ctx->hw;
1488 struct clk *clk = clk_hw_get_clk(hw, NULL);
1489
1490 KUNIT_EXPECT_LT(test,
1491 clk_set_rate_range(clk,
1492 DUMMY_CLOCK_RATE_1 + 1000,
1493 DUMMY_CLOCK_RATE_1),
1494 0);
1495
1496 clk_put(clk);
1497}
1498
1499/*
1500 * Test that users can't set multiple, disjoints, range that would be
1501 * impossible to meet.
1502 */
1503static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1504{
1505 struct clk_dummy_context *ctx = test->priv;
1506 struct clk_hw *hw = &ctx->hw;
1507 struct clk *user1, *user2;
1508
1509 user1 = clk_hw_get_clk(hw, NULL);
1510 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1511
1512 user2 = clk_hw_get_clk(hw, NULL);
1513 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1514
1515 KUNIT_ASSERT_EQ(test,
1516 clk_set_rate_range(user1, 1000, 2000),
1517 0);
1518
1519 KUNIT_EXPECT_LT(test,
1520 clk_set_rate_range(user2, 3000, 4000),
1521 0);
1522
1523 clk_put(user2);
1524 clk_put(user1);
1525}
1526
1527/*
1528 * Test that if our clock has some boundaries and we try to round a rate
1529 * lower than the minimum, the returned rate will be within range.
1530 */
1531static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1532{
1533 struct clk_dummy_context *ctx = test->priv;
1534 struct clk_hw *hw = &ctx->hw;
1535 struct clk *clk = clk_hw_get_clk(hw, NULL);
1536 long rate;
1537
1538 KUNIT_ASSERT_EQ(test,
1539 clk_set_rate_range(clk,
1540 DUMMY_CLOCK_RATE_1,
1541 DUMMY_CLOCK_RATE_2),
1542 0);
1543
1544 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1545 KUNIT_ASSERT_GT(test, rate, 0);
1546 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1547 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1548
1549 clk_put(clk);
1550}
1551
1552/*
1553 * Test that if our clock has some boundaries and we try to set a rate
1554 * higher than the maximum, the new rate will be within range.
1555 */
1556static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1557{
1558 struct clk_dummy_context *ctx = test->priv;
1559 struct clk_hw *hw = &ctx->hw;
1560 struct clk *clk = clk_hw_get_clk(hw, NULL);
1561 unsigned long rate;
1562
1563 KUNIT_ASSERT_EQ(test,
1564 clk_set_rate_range(clk,
1565 DUMMY_CLOCK_RATE_1,
1566 DUMMY_CLOCK_RATE_2),
1567 0);
1568
1569 KUNIT_ASSERT_EQ(test,
1570 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1571 0);
1572
1573 rate = clk_get_rate(clk);
1574 KUNIT_ASSERT_GT(test, rate, 0);
1575 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1576 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1577
1578 clk_put(clk);
1579}
1580
1581/*
1582 * Test that if our clock has some boundaries and we try to round and
1583 * set a rate lower than the minimum, the rate returned by
1584 * clk_round_rate() will be consistent with the new rate set by
1585 * clk_set_rate().
1586 */
1587static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1588{
1589 struct clk_dummy_context *ctx = test->priv;
1590 struct clk_hw *hw = &ctx->hw;
1591 struct clk *clk = clk_hw_get_clk(hw, NULL);
1592 long rounded;
1593
1594 KUNIT_ASSERT_EQ(test,
1595 clk_set_rate_range(clk,
1596 DUMMY_CLOCK_RATE_1,
1597 DUMMY_CLOCK_RATE_2),
1598 0);
1599
1600 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1601 KUNIT_ASSERT_GT(test, rounded, 0);
1602
1603 KUNIT_ASSERT_EQ(test,
1604 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1605 0);
1606
1607 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1608
1609 clk_put(clk);
1610}
1611
1612/*
1613 * Test that if our clock has some boundaries and we try to round a rate
1614 * higher than the maximum, the returned rate will be within range.
1615 */
1616static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1617{
1618 struct clk_dummy_context *ctx = test->priv;
1619 struct clk_hw *hw = &ctx->hw;
1620 struct clk *clk = clk_hw_get_clk(hw, NULL);
1621 long rate;
1622
1623 KUNIT_ASSERT_EQ(test,
1624 clk_set_rate_range(clk,
1625 DUMMY_CLOCK_RATE_1,
1626 DUMMY_CLOCK_RATE_2),
1627 0);
1628
1629 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1630 KUNIT_ASSERT_GT(test, rate, 0);
1631 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1632 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1633
1634 clk_put(clk);
1635}
1636
1637/*
1638 * Test that if our clock has some boundaries and we try to set a rate
1639 * higher than the maximum, the new rate will be within range.
1640 */
1641static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1642{
1643 struct clk_dummy_context *ctx = test->priv;
1644 struct clk_hw *hw = &ctx->hw;
1645 struct clk *clk = clk_hw_get_clk(hw, NULL);
1646 unsigned long rate;
1647
1648 KUNIT_ASSERT_EQ(test,
1649 clk_set_rate_range(clk,
1650 DUMMY_CLOCK_RATE_1,
1651 DUMMY_CLOCK_RATE_2),
1652 0);
1653
1654 KUNIT_ASSERT_EQ(test,
1655 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1656 0);
1657
1658 rate = clk_get_rate(clk);
1659 KUNIT_ASSERT_GT(test, rate, 0);
1660 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1661 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1662
1663 clk_put(clk);
1664}
1665
1666/*
1667 * Test that if our clock has some boundaries and we try to round and
1668 * set a rate higher than the maximum, the rate returned by
1669 * clk_round_rate() will be consistent with the new rate set by
1670 * clk_set_rate().
1671 */
1672static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1673{
1674 struct clk_dummy_context *ctx = test->priv;
1675 struct clk_hw *hw = &ctx->hw;
1676 struct clk *clk = clk_hw_get_clk(hw, NULL);
1677 long rounded;
1678
1679 KUNIT_ASSERT_EQ(test,
1680 clk_set_rate_range(clk,
1681 DUMMY_CLOCK_RATE_1,
1682 DUMMY_CLOCK_RATE_2),
1683 0);
1684
1685 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1686 KUNIT_ASSERT_GT(test, rounded, 0);
1687
1688 KUNIT_ASSERT_EQ(test,
1689 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1690 0);
1691
1692 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1693
1694 clk_put(clk);
1695}
1696
1697/*
1698 * Test that if our clock has a rate lower than the minimum set by a
1699 * call to clk_set_rate_range(), the rate will be raised to match the
1700 * new minimum.
1701 *
1702 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1703 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1704 */
1705static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1706{
1707 struct clk_dummy_context *ctx = test->priv;
1708 struct clk_hw *hw = &ctx->hw;
1709 struct clk *clk = clk_hw_get_clk(hw, NULL);
1710 unsigned long rate;
1711
1712 KUNIT_ASSERT_EQ(test,
1713 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1714 0);
1715
1716 KUNIT_ASSERT_EQ(test,
1717 clk_set_rate_range(clk,
1718 DUMMY_CLOCK_RATE_1,
1719 DUMMY_CLOCK_RATE_2),
1720 0);
1721
1722 rate = clk_get_rate(clk);
1723 KUNIT_ASSERT_GT(test, rate, 0);
1724 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1725
1726 clk_put(clk);
1727}
1728
1729/*
1730 * Test that if our clock has a rate higher than the maximum set by a
1731 * call to clk_set_rate_range(), the rate will be lowered to match the
1732 * new maximum.
1733 *
1734 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1735 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1736 */
1737static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1738{
1739 struct clk_dummy_context *ctx = test->priv;
1740 struct clk_hw *hw = &ctx->hw;
1741 struct clk *clk = clk_hw_get_clk(hw, NULL);
1742 unsigned long rate;
1743
1744 KUNIT_ASSERT_EQ(test,
1745 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1746 0);
1747
1748 KUNIT_ASSERT_EQ(test,
1749 clk_set_rate_range(clk,
1750 DUMMY_CLOCK_RATE_1,
1751 DUMMY_CLOCK_RATE_2),
1752 0);
1753
1754 rate = clk_get_rate(clk);
1755 KUNIT_ASSERT_GT(test, rate, 0);
1756 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1757
1758 clk_put(clk);
1759}
1760
1761static struct kunit_case clk_range_test_cases[] = {
1762 KUNIT_CASE(clk_range_test_set_range),
1763 KUNIT_CASE(clk_range_test_set_range_invalid),
1764 KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1765 KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1766 KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1767 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1768 KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1769 KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1770 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1771 KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1772 KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1773 {}
1774};
1775
1776/*
1777 * Test suite for a basic rate clock, without any parent.
1778 *
1779 * These tests exercise the rate range API: clk_set_rate_range(),
1780 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1781 */
1782static struct kunit_suite clk_range_test_suite = {
1783 .name = "clk-range-test",
1784 .init = clk_test_init,
1785 .exit = clk_test_exit,
1786 .test_cases = clk_range_test_cases,
1787};
1788
1789/*
1790 * Test that if we have several subsequent calls to
1791 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1792 * needed each and every time.
1793 *
1794 * With clk_dummy_maximize_rate_ops, this means that the rate will
1795 * trail along the maximum as it evolves.
1796 */
1797static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1798{
1799 struct clk_dummy_context *ctx = test->priv;
1800 struct clk_hw *hw = &ctx->hw;
1801 struct clk *clk = clk_hw_get_clk(hw, NULL);
1802 unsigned long rate;
1803
1804 KUNIT_ASSERT_EQ(test,
1805 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1806 0);
1807
1808 KUNIT_ASSERT_EQ(test,
1809 clk_set_rate_range(clk,
1810 DUMMY_CLOCK_RATE_1,
1811 DUMMY_CLOCK_RATE_2),
1812 0);
1813
1814 rate = clk_get_rate(clk);
1815 KUNIT_ASSERT_GT(test, rate, 0);
1816 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1817
1818 KUNIT_ASSERT_EQ(test,
1819 clk_set_rate_range(clk,
1820 DUMMY_CLOCK_RATE_1,
1821 DUMMY_CLOCK_RATE_2 - 1000),
1822 0);
1823
1824 rate = clk_get_rate(clk);
1825 KUNIT_ASSERT_GT(test, rate, 0);
1826 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1827
1828 KUNIT_ASSERT_EQ(test,
1829 clk_set_rate_range(clk,
1830 DUMMY_CLOCK_RATE_1,
1831 DUMMY_CLOCK_RATE_2),
1832 0);
1833
1834 rate = clk_get_rate(clk);
1835 KUNIT_ASSERT_GT(test, rate, 0);
1836 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1837
1838 clk_put(clk);
1839}
1840
1841/*
1842 * Test that if we have several subsequent calls to
1843 * clk_set_rate_range(), across multiple users, the core will reevaluate
1844 * whether a new rate is needed each and every time.
1845 *
1846 * With clk_dummy_maximize_rate_ops, this means that the rate will
1847 * trail along the maximum as it evolves.
1848 */
1849static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1850{
1851 struct clk_dummy_context *ctx = test->priv;
1852 struct clk_hw *hw = &ctx->hw;
1853 struct clk *clk = clk_hw_get_clk(hw, NULL);
1854 struct clk *user1, *user2;
1855 unsigned long rate;
1856
1857 user1 = clk_hw_get_clk(hw, NULL);
1858 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1859
1860 user2 = clk_hw_get_clk(hw, NULL);
1861 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1862
1863 KUNIT_ASSERT_EQ(test,
1864 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1865 0);
1866
1867 KUNIT_ASSERT_EQ(test,
1868 clk_set_rate_range(user1,
1869 0,
1870 DUMMY_CLOCK_RATE_2),
1871 0);
1872
1873 rate = clk_get_rate(clk);
1874 KUNIT_ASSERT_GT(test, rate, 0);
1875 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1876
1877 KUNIT_ASSERT_EQ(test,
1878 clk_set_rate_range(user2,
1879 0,
1880 DUMMY_CLOCK_RATE_1),
1881 0);
1882
1883 rate = clk_get_rate(clk);
1884 KUNIT_ASSERT_GT(test, rate, 0);
1885 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1886
1887 KUNIT_ASSERT_EQ(test,
1888 clk_drop_range(user2),
1889 0);
1890
1891 rate = clk_get_rate(clk);
1892 KUNIT_ASSERT_GT(test, rate, 0);
1893 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1894
1895 clk_put(user2);
1896 clk_put(user1);
1897 clk_put(clk);
1898}
1899
1900/*
1901 * Test that if we have several subsequent calls to
1902 * clk_set_rate_range(), across multiple users, the core will reevaluate
1903 * whether a new rate is needed, including when a user drop its clock.
1904 *
1905 * With clk_dummy_maximize_rate_ops, this means that the rate will
1906 * trail along the maximum as it evolves.
1907 */
1908static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1909{
1910 struct clk_dummy_context *ctx = test->priv;
1911 struct clk_hw *hw = &ctx->hw;
1912 struct clk *clk = clk_hw_get_clk(hw, NULL);
1913 struct clk *user1, *user2;
1914 unsigned long rate;
1915
1916 user1 = clk_hw_get_clk(hw, NULL);
1917 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1918
1919 user2 = clk_hw_get_clk(hw, NULL);
1920 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1921
1922 KUNIT_ASSERT_EQ(test,
1923 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1924 0);
1925
1926 KUNIT_ASSERT_EQ(test,
1927 clk_set_rate_range(user1,
1928 0,
1929 DUMMY_CLOCK_RATE_2),
1930 0);
1931
1932 rate = clk_get_rate(clk);
1933 KUNIT_ASSERT_GT(test, rate, 0);
1934 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1935
1936 KUNIT_ASSERT_EQ(test,
1937 clk_set_rate_range(user2,
1938 0,
1939 DUMMY_CLOCK_RATE_1),
1940 0);
1941
1942 rate = clk_get_rate(clk);
1943 KUNIT_ASSERT_GT(test, rate, 0);
1944 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1945
1946 clk_put(user2);
1947
1948 rate = clk_get_rate(clk);
1949 KUNIT_ASSERT_GT(test, rate, 0);
1950 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1951
1952 clk_put(user1);
1953 clk_put(clk);
1954}
1955
1956static struct kunit_case clk_range_maximize_test_cases[] = {
1957 KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1958 KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1959 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1960 {}
1961};
1962
1963/*
1964 * Test suite for a basic rate clock, without any parent.
1965 *
1966 * These tests exercise the rate range API: clk_set_rate_range(),
1967 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1968 * driver that will always try to run at the highest possible rate.
1969 */
1970static struct kunit_suite clk_range_maximize_test_suite = {
1971 .name = "clk-range-maximize-test",
1972 .init = clk_maximize_test_init,
1973 .exit = clk_test_exit,
1974 .test_cases = clk_range_maximize_test_cases,
1975};
1976
1977/*
1978 * Test that if we have several subsequent calls to
1979 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1980 * needed each and every time.
1981 *
1982 * With clk_dummy_minimize_rate_ops, this means that the rate will
1983 * trail along the minimum as it evolves.
1984 */
1985static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1986{
1987 struct clk_dummy_context *ctx = test->priv;
1988 struct clk_hw *hw = &ctx->hw;
1989 struct clk *clk = clk_hw_get_clk(hw, NULL);
1990 unsigned long rate;
1991
1992 KUNIT_ASSERT_EQ(test,
1993 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1994 0);
1995
1996 KUNIT_ASSERT_EQ(test,
1997 clk_set_rate_range(clk,
1998 DUMMY_CLOCK_RATE_1,
1999 DUMMY_CLOCK_RATE_2),
2000 0);
2001
2002 rate = clk_get_rate(clk);
2003 KUNIT_ASSERT_GT(test, rate, 0);
2004 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2005
2006 KUNIT_ASSERT_EQ(test,
2007 clk_set_rate_range(clk,
2008 DUMMY_CLOCK_RATE_1 + 1000,
2009 DUMMY_CLOCK_RATE_2),
2010 0);
2011
2012 rate = clk_get_rate(clk);
2013 KUNIT_ASSERT_GT(test, rate, 0);
2014 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
2015
2016 KUNIT_ASSERT_EQ(test,
2017 clk_set_rate_range(clk,
2018 DUMMY_CLOCK_RATE_1,
2019 DUMMY_CLOCK_RATE_2),
2020 0);
2021
2022 rate = clk_get_rate(clk);
2023 KUNIT_ASSERT_GT(test, rate, 0);
2024 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2025
2026 clk_put(clk);
2027}
2028
2029/*
2030 * Test that if we have several subsequent calls to
2031 * clk_set_rate_range(), across multiple users, the core will reevaluate
2032 * whether a new rate is needed each and every time.
2033 *
2034 * With clk_dummy_minimize_rate_ops, this means that the rate will
2035 * trail along the minimum as it evolves.
2036 */
2037static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2038{
2039 struct clk_dummy_context *ctx = test->priv;
2040 struct clk_hw *hw = &ctx->hw;
2041 struct clk *clk = clk_hw_get_clk(hw, NULL);
2042 struct clk *user1, *user2;
2043 unsigned long rate;
2044
2045 user1 = clk_hw_get_clk(hw, NULL);
2046 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2047
2048 user2 = clk_hw_get_clk(hw, NULL);
2049 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2050
2051 KUNIT_ASSERT_EQ(test,
2052 clk_set_rate_range(user1,
2053 DUMMY_CLOCK_RATE_1,
2054 ULONG_MAX),
2055 0);
2056
2057 rate = clk_get_rate(clk);
2058 KUNIT_ASSERT_GT(test, rate, 0);
2059 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2060
2061 KUNIT_ASSERT_EQ(test,
2062 clk_set_rate_range(user2,
2063 DUMMY_CLOCK_RATE_2,
2064 ULONG_MAX),
2065 0);
2066
2067 rate = clk_get_rate(clk);
2068 KUNIT_ASSERT_GT(test, rate, 0);
2069 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2070
2071 KUNIT_ASSERT_EQ(test,
2072 clk_drop_range(user2),
2073 0);
2074
2075 rate = clk_get_rate(clk);
2076 KUNIT_ASSERT_GT(test, rate, 0);
2077 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2078
2079 clk_put(user2);
2080 clk_put(user1);
2081 clk_put(clk);
2082}
2083
2084/*
2085 * Test that if we have several subsequent calls to
2086 * clk_set_rate_range(), across multiple users, the core will reevaluate
2087 * whether a new rate is needed, including when a user drop its clock.
2088 *
2089 * With clk_dummy_minimize_rate_ops, this means that the rate will
2090 * trail along the minimum as it evolves.
2091 */
2092static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2093{
2094 struct clk_dummy_context *ctx = test->priv;
2095 struct clk_hw *hw = &ctx->hw;
2096 struct clk *clk = clk_hw_get_clk(hw, NULL);
2097 struct clk *user1, *user2;
2098 unsigned long rate;
2099
2100 user1 = clk_hw_get_clk(hw, NULL);
2101 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2102
2103 user2 = clk_hw_get_clk(hw, NULL);
2104 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2105
2106 KUNIT_ASSERT_EQ(test,
2107 clk_set_rate_range(user1,
2108 DUMMY_CLOCK_RATE_1,
2109 ULONG_MAX),
2110 0);
2111
2112 rate = clk_get_rate(clk);
2113 KUNIT_ASSERT_GT(test, rate, 0);
2114 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2115
2116 KUNIT_ASSERT_EQ(test,
2117 clk_set_rate_range(user2,
2118 DUMMY_CLOCK_RATE_2,
2119 ULONG_MAX),
2120 0);
2121
2122 rate = clk_get_rate(clk);
2123 KUNIT_ASSERT_GT(test, rate, 0);
2124 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2125
2126 clk_put(user2);
2127
2128 rate = clk_get_rate(clk);
2129 KUNIT_ASSERT_GT(test, rate, 0);
2130 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2131
2132 clk_put(user1);
2133 clk_put(clk);
2134}
2135
2136static struct kunit_case clk_range_minimize_test_cases[] = {
2137 KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2138 KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2139 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2140 {}
2141};
2142
2143/*
2144 * Test suite for a basic rate clock, without any parent.
2145 *
2146 * These tests exercise the rate range API: clk_set_rate_range(),
2147 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2148 * driver that will always try to run at the lowest possible rate.
2149 */
2150static struct kunit_suite clk_range_minimize_test_suite = {
2151 .name = "clk-range-minimize-test",
2152 .init = clk_minimize_test_init,
2153 .exit = clk_test_exit,
2154 .test_cases = clk_range_minimize_test_cases,
2155};
2156
2157struct clk_leaf_mux_ctx {
2158 struct clk_multiple_parent_ctx mux_ctx;
2159 struct clk_hw hw;
2160 struct clk_hw parent;
2161 struct clk_rate_request *req;
2162 int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2163};
2164
2165static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
2166{
2167 struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw);
2168 int ret;
2169 struct clk_rate_request *parent_req = ctx->req;
2170
2171 clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate);
2172 ret = ctx->determine_rate_func(req->best_parent_hw, parent_req);
2173 if (ret)
2174 return ret;
2175
2176 req->rate = parent_req->rate;
2177
2178 return 0;
2179}
2180
2181static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = {
2182 .determine_rate = clk_leaf_mux_determine_rate,
2183 .set_parent = clk_dummy_single_set_parent,
2184 .get_parent = clk_dummy_single_get_parent,
2185};
2186
2187static int
2188clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2189{
2190 struct clk_leaf_mux_ctx *ctx;
2191 const char *top_parents[2] = { "parent-0", "parent-1" };
2192 int ret;
2193
2194 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2195 if (!ctx)
2196 return -ENOMEM;
2197 test->priv = ctx;
2198
2199 ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2200 &clk_dummy_rate_ops,
2201 0);
2202 ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2203 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2204 if (ret)
2205 return ret;
2206
2207 ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2208 &clk_dummy_rate_ops,
2209 0);
2210 ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2211 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2212 if (ret)
2213 return ret;
2214
2215 ctx->mux_ctx.current_parent = 0;
2216 ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2217 &clk_multiple_parents_mux_ops,
2218 0);
2219 ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2220 if (ret)
2221 return ret;
2222
2223 ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw,
2224 &empty_clk_ops, CLK_SET_RATE_PARENT);
2225 ret = clk_hw_register(NULL, &ctx->parent);
2226 if (ret)
2227 return ret;
2228
2229 ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent,
2230 &clk_leaf_mux_set_rate_parent_ops,
2231 CLK_SET_RATE_PARENT);
2232 ret = clk_hw_register(NULL, &ctx->hw);
2233 if (ret)
2234 return ret;
2235
2236 return 0;
2237}
2238
2239static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2240{
2241 struct clk_leaf_mux_ctx *ctx = test->priv;
2242
2243 clk_hw_unregister(&ctx->hw);
2244 clk_hw_unregister(&ctx->parent);
2245 clk_hw_unregister(&ctx->mux_ctx.hw);
2246 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2247 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2248}
2249
2250struct clk_leaf_mux_set_rate_parent_determine_rate_test_case {
2251 const char *desc;
2252 int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2253};
2254
2255static void
2256clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(
2257 const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc)
2258{
2259 strcpy(desc, t->desc);
2260}
2261
2262static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case
2263clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = {
2264 {
2265 /*
2266 * Test that __clk_determine_rate() on the parent that can't
2267 * change rate doesn't return a clk_rate_request structure with
2268 * the best_parent_hw pointer pointing to the parent.
2269 */
2270 .desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent",
2271 .determine_rate_func = __clk_determine_rate,
2272 },
2273 {
2274 /*
2275 * Test that __clk_mux_determine_rate() on the parent that
2276 * can't change rate doesn't return a clk_rate_request
2277 * structure with the best_parent_hw pointer pointing to
2278 * the parent.
2279 */
2280 .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent",
2281 .determine_rate_func = __clk_mux_determine_rate,
2282 },
2283 {
2284 /*
2285 * Test that __clk_mux_determine_rate_closest() on the parent
2286 * that can't change rate doesn't return a clk_rate_request
2287 * structure with the best_parent_hw pointer pointing to
2288 * the parent.
2289 */
2290 .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent",
2291 .determine_rate_func = __clk_mux_determine_rate_closest,
2292 },
2293 {
2294 /*
2295 * Test that clk_hw_determine_rate_no_reparent() on the parent
2296 * that can't change rate doesn't return a clk_rate_request
2297 * structure with the best_parent_hw pointer pointing to
2298 * the parent.
2299 */
2300 .desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent",
2301 .determine_rate_func = clk_hw_determine_rate_no_reparent,
2302 },
2303};
2304
2305KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2306 clk_leaf_mux_set_rate_parent_determine_rate_test_cases,
2307 clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)
2308
2309/*
2310 * Test that when a clk that can't change rate itself calls a function like
2311 * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request
2312 * structure that has the best_parent_hw pointer point to the clk_hw passed
2313 * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop
2314 * forwarding clk_rate_requests to the parent") for more background.
2315 */
2316static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test)
2317{
2318 struct clk_leaf_mux_ctx *ctx = test->priv;
2319 struct clk_hw *hw = &ctx->hw;
2320 struct clk *clk = clk_hw_get_clk(hw, NULL);
2321 struct clk_rate_request req;
2322 unsigned long rate;
2323 const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param;
2324
2325 test_param = test->param_value;
2326 ctx->determine_rate_func = test_param->determine_rate_func;
2327
2328 ctx->req = &req;
2329 rate = clk_get_rate(clk);
2330 KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2331 KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2));
2332
2333 KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2334 KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2335 KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2336
2337 clk_put(clk);
2338}
2339
2340static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2341 KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2342 clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params),
2343 {}
2344};
2345
2346/*
2347 * Test suite for a clock whose parent is a pass-through clk whose parent is a
2348 * mux with multiple parents. The leaf and pass-through clocks have the
2349 * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which
2350 * will then select which parent is the best fit for a given rate.
2351 *
2352 * These tests exercise the behaviour of muxes, and the proper selection
2353 * of parents.
2354 */
2355static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2356 .name = "clk-leaf-mux-set-rate-parent",
2357 .init = clk_leaf_mux_set_rate_parent_test_init,
2358 .exit = clk_leaf_mux_set_rate_parent_test_exit,
2359 .test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2360};
2361
2362struct clk_mux_notifier_rate_change {
2363 bool done;
2364 unsigned long old_rate;
2365 unsigned long new_rate;
2366 wait_queue_head_t wq;
2367};
2368
2369struct clk_mux_notifier_ctx {
2370 struct clk_multiple_parent_ctx mux_ctx;
2371 struct clk *clk;
2372 struct notifier_block clk_nb;
2373 struct clk_mux_notifier_rate_change pre_rate_change;
2374 struct clk_mux_notifier_rate_change post_rate_change;
2375};
2376
2377#define NOTIFIER_TIMEOUT_MS 100
2378
2379static int clk_mux_notifier_callback(struct notifier_block *nb,
2380 unsigned long action, void *data)
2381{
2382 struct clk_notifier_data *clk_data = data;
2383 struct clk_mux_notifier_ctx *ctx = container_of(nb,
2384 struct clk_mux_notifier_ctx,
2385 clk_nb);
2386
2387 if (action & PRE_RATE_CHANGE) {
2388 ctx->pre_rate_change.old_rate = clk_data->old_rate;
2389 ctx->pre_rate_change.new_rate = clk_data->new_rate;
2390 ctx->pre_rate_change.done = true;
2391 wake_up_interruptible(&ctx->pre_rate_change.wq);
2392 }
2393
2394 if (action & POST_RATE_CHANGE) {
2395 ctx->post_rate_change.old_rate = clk_data->old_rate;
2396 ctx->post_rate_change.new_rate = clk_data->new_rate;
2397 ctx->post_rate_change.done = true;
2398 wake_up_interruptible(&ctx->post_rate_change.wq);
2399 }
2400
2401 return 0;
2402}
2403
2404static int clk_mux_notifier_test_init(struct kunit *test)
2405{
2406 struct clk_mux_notifier_ctx *ctx;
2407 const char *top_parents[2] = { "parent-0", "parent-1" };
2408 int ret;
2409
2410 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2411 if (!ctx)
2412 return -ENOMEM;
2413 test->priv = ctx;
2414 ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2415 init_waitqueue_head(&ctx->pre_rate_change.wq);
2416 init_waitqueue_head(&ctx->post_rate_change.wq);
2417
2418 ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2419 &clk_dummy_rate_ops,
2420 0);
2421 ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2422 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2423 if (ret)
2424 return ret;
2425
2426 ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2427 &clk_dummy_rate_ops,
2428 0);
2429 ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2430 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2431 if (ret)
2432 return ret;
2433
2434 ctx->mux_ctx.current_parent = 0;
2435 ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2436 &clk_multiple_parents_mux_ops,
2437 0);
2438 ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2439 if (ret)
2440 return ret;
2441
2442 ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2443 ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2444 if (ret)
2445 return ret;
2446
2447 return 0;
2448}
2449
2450static void clk_mux_notifier_test_exit(struct kunit *test)
2451{
2452 struct clk_mux_notifier_ctx *ctx = test->priv;
2453 struct clk *clk = ctx->clk;
2454
2455 clk_notifier_unregister(clk, &ctx->clk_nb);
2456 clk_put(clk);
2457
2458 clk_hw_unregister(&ctx->mux_ctx.hw);
2459 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2460 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2461}
2462
2463/*
2464 * Test that if the we have a notifier registered on a mux, the core
2465 * will notify us when we switch to another parent, and with the proper
2466 * old and new rates.
2467 */
2468static void clk_mux_notifier_set_parent_test(struct kunit *test)
2469{
2470 struct clk_mux_notifier_ctx *ctx = test->priv;
2471 struct clk_hw *hw = &ctx->mux_ctx.hw;
2472 struct clk *clk = clk_hw_get_clk(hw, NULL);
2473 struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2474 int ret;
2475
2476 ret = clk_set_parent(clk, new_parent);
2477 KUNIT_ASSERT_EQ(test, ret, 0);
2478
2479 ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2480 ctx->pre_rate_change.done,
2481 msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2482 KUNIT_ASSERT_GT(test, ret, 0);
2483
2484 KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2485 KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2486
2487 ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2488 ctx->post_rate_change.done,
2489 msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2490 KUNIT_ASSERT_GT(test, ret, 0);
2491
2492 KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2493 KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2494
2495 clk_put(new_parent);
2496 clk_put(clk);
2497}
2498
2499static struct kunit_case clk_mux_notifier_test_cases[] = {
2500 KUNIT_CASE(clk_mux_notifier_set_parent_test),
2501 {}
2502};
2503
2504/*
2505 * Test suite for a mux with multiple parents, and a notifier registered
2506 * on the mux.
2507 *
2508 * These tests exercise the behaviour of notifiers.
2509 */
2510static struct kunit_suite clk_mux_notifier_test_suite = {
2511 .name = "clk-mux-notifier",
2512 .init = clk_mux_notifier_test_init,
2513 .exit = clk_mux_notifier_test_exit,
2514 .test_cases = clk_mux_notifier_test_cases,
2515};
2516
2517static int
2518clk_mux_no_reparent_test_init(struct kunit *test)
2519{
2520 struct clk_multiple_parent_ctx *ctx;
2521 const char *parents[2] = { "parent-0", "parent-1"};
2522 int ret;
2523
2524 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2525 if (!ctx)
2526 return -ENOMEM;
2527 test->priv = ctx;
2528
2529 ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2530 &clk_dummy_rate_ops,
2531 0);
2532 ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2533 ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
2534 if (ret)
2535 return ret;
2536
2537 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2538 &clk_dummy_rate_ops,
2539 0);
2540 ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2541 ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
2542 if (ret)
2543 return ret;
2544
2545 ctx->current_parent = 0;
2546 ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
2547 &clk_multiple_parents_no_reparent_mux_ops,
2548 0);
2549 ret = clk_hw_register(NULL, &ctx->hw);
2550 if (ret)
2551 return ret;
2552
2553 return 0;
2554}
2555
2556static void
2557clk_mux_no_reparent_test_exit(struct kunit *test)
2558{
2559 struct clk_multiple_parent_ctx *ctx = test->priv;
2560
2561 clk_hw_unregister(&ctx->hw);
2562 clk_hw_unregister(&ctx->parents_ctx[0].hw);
2563 clk_hw_unregister(&ctx->parents_ctx[1].hw);
2564}
2565
2566/*
2567 * Test that if the we have a mux that cannot change parent and we call
2568 * clk_round_rate() on it with a rate that should cause it to change
2569 * parent, it won't.
2570 */
2571static void clk_mux_no_reparent_round_rate(struct kunit *test)
2572{
2573 struct clk_multiple_parent_ctx *ctx = test->priv;
2574 struct clk_hw *hw = &ctx->hw;
2575 struct clk *clk = clk_hw_get_clk(hw, NULL);
2576 struct clk *other_parent, *parent;
2577 unsigned long other_parent_rate;
2578 unsigned long parent_rate;
2579 long rounded_rate;
2580
2581 parent = clk_get_parent(clk);
2582 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2583
2584 parent_rate = clk_get_rate(parent);
2585 KUNIT_ASSERT_GT(test, parent_rate, 0);
2586
2587 other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2588 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2589 KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2590
2591 other_parent_rate = clk_get_rate(other_parent);
2592 KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2593 clk_put(other_parent);
2594
2595 rounded_rate = clk_round_rate(clk, other_parent_rate);
2596 KUNIT_ASSERT_GT(test, rounded_rate, 0);
2597 KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
2598
2599 clk_put(clk);
2600}
2601
2602/*
2603 * Test that if the we have a mux that cannot change parent and we call
2604 * clk_set_rate() on it with a rate that should cause it to change
2605 * parent, it won't.
2606 */
2607static void clk_mux_no_reparent_set_rate(struct kunit *test)
2608{
2609 struct clk_multiple_parent_ctx *ctx = test->priv;
2610 struct clk_hw *hw = &ctx->hw;
2611 struct clk *clk = clk_hw_get_clk(hw, NULL);
2612 struct clk *other_parent, *parent;
2613 unsigned long other_parent_rate;
2614 unsigned long parent_rate;
2615 unsigned long rate;
2616 int ret;
2617
2618 parent = clk_get_parent(clk);
2619 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2620
2621 parent_rate = clk_get_rate(parent);
2622 KUNIT_ASSERT_GT(test, parent_rate, 0);
2623
2624 other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2625 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2626 KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2627
2628 other_parent_rate = clk_get_rate(other_parent);
2629 KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2630 clk_put(other_parent);
2631
2632 ret = clk_set_rate(clk, other_parent_rate);
2633 KUNIT_ASSERT_EQ(test, ret, 0);
2634
2635 rate = clk_get_rate(clk);
2636 KUNIT_ASSERT_GT(test, rate, 0);
2637 KUNIT_EXPECT_EQ(test, rate, parent_rate);
2638
2639 clk_put(clk);
2640}
2641
2642static struct kunit_case clk_mux_no_reparent_test_cases[] = {
2643 KUNIT_CASE(clk_mux_no_reparent_round_rate),
2644 KUNIT_CASE(clk_mux_no_reparent_set_rate),
2645 {}
2646};
2647
2648/*
2649 * Test suite for a clock mux that isn't allowed to change parent, using
2650 * the clk_hw_determine_rate_no_reparent() helper.
2651 *
2652 * These tests exercise that helper, and the proper selection of
2653 * rates and parents.
2654 */
2655static struct kunit_suite clk_mux_no_reparent_test_suite = {
2656 .name = "clk-mux-no-reparent",
2657 .init = clk_mux_no_reparent_test_init,
2658 .exit = clk_mux_no_reparent_test_exit,
2659 .test_cases = clk_mux_no_reparent_test_cases,
2660};
2661
2662kunit_test_suites(
2663 &clk_leaf_mux_set_rate_parent_test_suite,
2664 &clk_test_suite,
2665 &clk_multiple_parents_mux_test_suite,
2666 &clk_mux_no_reparent_test_suite,
2667 &clk_mux_notifier_test_suite,
2668 &clk_orphan_transparent_multiple_parent_mux_test_suite,
2669 &clk_orphan_transparent_single_parent_test_suite,
2670 &clk_orphan_two_level_root_last_test_suite,
2671 &clk_range_test_suite,
2672 &clk_range_maximize_test_suite,
2673 &clk_range_minimize_test_suite,
2674 &clk_single_parent_mux_test_suite,
2675 &clk_uncached_test_suite
2676);
2677MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kunit test for clk rate management
4 */
5#include <linux/clk.h>
6#include <linux/clk-provider.h>
7
8/* Needed for clk_hw_get_clk() */
9#include "clk.h"
10
11#include <kunit/test.h>
12
13#define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
14#define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000)
15#define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000)
16
17struct clk_dummy_context {
18 struct clk_hw hw;
19 unsigned long rate;
20};
21
22static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
23 unsigned long parent_rate)
24{
25 struct clk_dummy_context *ctx =
26 container_of(hw, struct clk_dummy_context, hw);
27
28 return ctx->rate;
29}
30
31static int clk_dummy_determine_rate(struct clk_hw *hw,
32 struct clk_rate_request *req)
33{
34 /* Just return the same rate without modifying it */
35 return 0;
36}
37
38static int clk_dummy_maximize_rate(struct clk_hw *hw,
39 struct clk_rate_request *req)
40{
41 /*
42 * If there's a maximum set, always run the clock at the maximum
43 * allowed.
44 */
45 if (req->max_rate < ULONG_MAX)
46 req->rate = req->max_rate;
47
48 return 0;
49}
50
51static int clk_dummy_minimize_rate(struct clk_hw *hw,
52 struct clk_rate_request *req)
53{
54 /*
55 * If there's a minimum set, always run the clock at the minimum
56 * allowed.
57 */
58 if (req->min_rate > 0)
59 req->rate = req->min_rate;
60
61 return 0;
62}
63
64static int clk_dummy_set_rate(struct clk_hw *hw,
65 unsigned long rate,
66 unsigned long parent_rate)
67{
68 struct clk_dummy_context *ctx =
69 container_of(hw, struct clk_dummy_context, hw);
70
71 ctx->rate = rate;
72 return 0;
73}
74
75static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
76{
77 if (index >= clk_hw_get_num_parents(hw))
78 return -EINVAL;
79
80 return 0;
81}
82
83static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
84{
85 return 0;
86}
87
88static const struct clk_ops clk_dummy_rate_ops = {
89 .recalc_rate = clk_dummy_recalc_rate,
90 .determine_rate = clk_dummy_determine_rate,
91 .set_rate = clk_dummy_set_rate,
92};
93
94static const struct clk_ops clk_dummy_maximize_rate_ops = {
95 .recalc_rate = clk_dummy_recalc_rate,
96 .determine_rate = clk_dummy_maximize_rate,
97 .set_rate = clk_dummy_set_rate,
98};
99
100static const struct clk_ops clk_dummy_minimize_rate_ops = {
101 .recalc_rate = clk_dummy_recalc_rate,
102 .determine_rate = clk_dummy_minimize_rate,
103 .set_rate = clk_dummy_set_rate,
104};
105
106static const struct clk_ops clk_dummy_single_parent_ops = {
107 .set_parent = clk_dummy_single_set_parent,
108 .get_parent = clk_dummy_single_get_parent,
109};
110
111struct clk_multiple_parent_ctx {
112 struct clk_dummy_context parents_ctx[2];
113 struct clk_hw hw;
114 u8 current_parent;
115};
116
117static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
118{
119 struct clk_multiple_parent_ctx *ctx =
120 container_of(hw, struct clk_multiple_parent_ctx, hw);
121
122 if (index >= clk_hw_get_num_parents(hw))
123 return -EINVAL;
124
125 ctx->current_parent = index;
126
127 return 0;
128}
129
130static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
131{
132 struct clk_multiple_parent_ctx *ctx =
133 container_of(hw, struct clk_multiple_parent_ctx, hw);
134
135 return ctx->current_parent;
136}
137
138static const struct clk_ops clk_multiple_parents_mux_ops = {
139 .get_parent = clk_multiple_parents_mux_get_parent,
140 .set_parent = clk_multiple_parents_mux_set_parent,
141 .determine_rate = __clk_mux_determine_rate_closest,
142};
143
144static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
145{
146 struct clk_dummy_context *ctx;
147 struct clk_init_data init = { };
148 int ret;
149
150 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
151 if (!ctx)
152 return -ENOMEM;
153 ctx->rate = DUMMY_CLOCK_INIT_RATE;
154 test->priv = ctx;
155
156 init.name = "test_dummy_rate";
157 init.ops = ops;
158 ctx->hw.init = &init;
159
160 ret = clk_hw_register(NULL, &ctx->hw);
161 if (ret)
162 return ret;
163
164 return 0;
165}
166
167static int clk_test_init(struct kunit *test)
168{
169 return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
170}
171
172static int clk_maximize_test_init(struct kunit *test)
173{
174 return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
175}
176
177static int clk_minimize_test_init(struct kunit *test)
178{
179 return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
180}
181
182static void clk_test_exit(struct kunit *test)
183{
184 struct clk_dummy_context *ctx = test->priv;
185
186 clk_hw_unregister(&ctx->hw);
187}
188
189/*
190 * Test that the actual rate matches what is returned by clk_get_rate()
191 */
192static void clk_test_get_rate(struct kunit *test)
193{
194 struct clk_dummy_context *ctx = test->priv;
195 struct clk_hw *hw = &ctx->hw;
196 struct clk *clk = clk_hw_get_clk(hw, NULL);
197 unsigned long rate;
198
199 rate = clk_get_rate(clk);
200 KUNIT_ASSERT_GT(test, rate, 0);
201 KUNIT_EXPECT_EQ(test, rate, ctx->rate);
202
203 clk_put(clk);
204}
205
206/*
207 * Test that, after a call to clk_set_rate(), the rate returned by
208 * clk_get_rate() matches.
209 *
210 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
211 * modify the requested rate, which is our case in clk_dummy_rate_ops.
212 */
213static void clk_test_set_get_rate(struct kunit *test)
214{
215 struct clk_dummy_context *ctx = test->priv;
216 struct clk_hw *hw = &ctx->hw;
217 struct clk *clk = clk_hw_get_clk(hw, NULL);
218 unsigned long rate;
219
220 KUNIT_ASSERT_EQ(test,
221 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
222 0);
223
224 rate = clk_get_rate(clk);
225 KUNIT_ASSERT_GT(test, rate, 0);
226 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
227
228 clk_put(clk);
229}
230
231/*
232 * Test that, after several calls to clk_set_rate(), the rate returned
233 * by clk_get_rate() matches the last one.
234 *
235 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
236 * modify the requested rate, which is our case in clk_dummy_rate_ops.
237 */
238static void clk_test_set_set_get_rate(struct kunit *test)
239{
240 struct clk_dummy_context *ctx = test->priv;
241 struct clk_hw *hw = &ctx->hw;
242 struct clk *clk = clk_hw_get_clk(hw, NULL);
243 unsigned long rate;
244
245 KUNIT_ASSERT_EQ(test,
246 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
247 0);
248
249 KUNIT_ASSERT_EQ(test,
250 clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
251 0);
252
253 rate = clk_get_rate(clk);
254 KUNIT_ASSERT_GT(test, rate, 0);
255 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
256
257 clk_put(clk);
258}
259
260/*
261 * Test that clk_round_rate and clk_set_rate are consitent and will
262 * return the same frequency.
263 */
264static void clk_test_round_set_get_rate(struct kunit *test)
265{
266 struct clk_dummy_context *ctx = test->priv;
267 struct clk_hw *hw = &ctx->hw;
268 struct clk *clk = clk_hw_get_clk(hw, NULL);
269 unsigned long rounded_rate, set_rate;
270
271 rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
272 KUNIT_ASSERT_GT(test, rounded_rate, 0);
273 KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
274
275 KUNIT_ASSERT_EQ(test,
276 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
277 0);
278
279 set_rate = clk_get_rate(clk);
280 KUNIT_ASSERT_GT(test, set_rate, 0);
281 KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
282
283 clk_put(clk);
284}
285
286static struct kunit_case clk_test_cases[] = {
287 KUNIT_CASE(clk_test_get_rate),
288 KUNIT_CASE(clk_test_set_get_rate),
289 KUNIT_CASE(clk_test_set_set_get_rate),
290 KUNIT_CASE(clk_test_round_set_get_rate),
291 {}
292};
293
294/*
295 * Test suite for a basic rate clock, without any parent.
296 *
297 * These tests exercise the rate API with simple scenarios
298 */
299static struct kunit_suite clk_test_suite = {
300 .name = "clk-test",
301 .init = clk_test_init,
302 .exit = clk_test_exit,
303 .test_cases = clk_test_cases,
304};
305
306static int clk_uncached_test_init(struct kunit *test)
307{
308 struct clk_dummy_context *ctx;
309 int ret;
310
311 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
312 if (!ctx)
313 return -ENOMEM;
314 test->priv = ctx;
315
316 ctx->rate = DUMMY_CLOCK_INIT_RATE;
317 ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
318 &clk_dummy_rate_ops,
319 CLK_GET_RATE_NOCACHE);
320
321 ret = clk_hw_register(NULL, &ctx->hw);
322 if (ret)
323 return ret;
324
325 return 0;
326}
327
328/*
329 * Test that for an uncached clock, the clock framework doesn't cache
330 * the rate and clk_get_rate() will return the underlying clock rate
331 * even if it changed.
332 */
333static void clk_test_uncached_get_rate(struct kunit *test)
334{
335 struct clk_dummy_context *ctx = test->priv;
336 struct clk_hw *hw = &ctx->hw;
337 struct clk *clk = clk_hw_get_clk(hw, NULL);
338 unsigned long rate;
339
340 rate = clk_get_rate(clk);
341 KUNIT_ASSERT_GT(test, rate, 0);
342 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
343
344 /* We change the rate behind the clock framework's back */
345 ctx->rate = DUMMY_CLOCK_RATE_1;
346 rate = clk_get_rate(clk);
347 KUNIT_ASSERT_GT(test, rate, 0);
348 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
349
350 clk_put(clk);
351}
352
353/*
354 * Test that for an uncached clock, clk_set_rate_range() will work
355 * properly if the rate hasn't changed.
356 */
357static void clk_test_uncached_set_range(struct kunit *test)
358{
359 struct clk_dummy_context *ctx = test->priv;
360 struct clk_hw *hw = &ctx->hw;
361 struct clk *clk = clk_hw_get_clk(hw, NULL);
362 unsigned long rate;
363
364 KUNIT_ASSERT_EQ(test,
365 clk_set_rate_range(clk,
366 DUMMY_CLOCK_RATE_1,
367 DUMMY_CLOCK_RATE_2),
368 0);
369
370 rate = clk_get_rate(clk);
371 KUNIT_ASSERT_GT(test, rate, 0);
372 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
373 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
374
375 clk_put(clk);
376}
377
378/*
379 * Test that for an uncached clock, clk_set_rate_range() will work
380 * properly if the rate has changed in hardware.
381 *
382 * In this case, it means that if the rate wasn't initially in the range
383 * we're trying to set, but got changed at some point into the range
384 * without the kernel knowing about it, its rate shouldn't be affected.
385 */
386static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
387{
388 struct clk_dummy_context *ctx = test->priv;
389 struct clk_hw *hw = &ctx->hw;
390 struct clk *clk = clk_hw_get_clk(hw, NULL);
391 unsigned long rate;
392
393 /* We change the rate behind the clock framework's back */
394 ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
395 KUNIT_ASSERT_EQ(test,
396 clk_set_rate_range(clk,
397 DUMMY_CLOCK_RATE_1,
398 DUMMY_CLOCK_RATE_2),
399 0);
400
401 rate = clk_get_rate(clk);
402 KUNIT_ASSERT_GT(test, rate, 0);
403 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
404
405 clk_put(clk);
406}
407
408static struct kunit_case clk_uncached_test_cases[] = {
409 KUNIT_CASE(clk_test_uncached_get_rate),
410 KUNIT_CASE(clk_test_uncached_set_range),
411 KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
412 {}
413};
414
415/*
416 * Test suite for a basic, uncached, rate clock, without any parent.
417 *
418 * These tests exercise the rate API with simple scenarios
419 */
420static struct kunit_suite clk_uncached_test_suite = {
421 .name = "clk-uncached-test",
422 .init = clk_uncached_test_init,
423 .exit = clk_test_exit,
424 .test_cases = clk_uncached_test_cases,
425};
426
427static int
428clk_multiple_parents_mux_test_init(struct kunit *test)
429{
430 struct clk_multiple_parent_ctx *ctx;
431 const char *parents[2] = { "parent-0", "parent-1"};
432 int ret;
433
434 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
435 if (!ctx)
436 return -ENOMEM;
437 test->priv = ctx;
438
439 ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
440 &clk_dummy_rate_ops,
441 0);
442 ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
443 ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
444 if (ret)
445 return ret;
446
447 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
448 &clk_dummy_rate_ops,
449 0);
450 ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
451 ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
452 if (ret)
453 return ret;
454
455 ctx->current_parent = 0;
456 ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
457 &clk_multiple_parents_mux_ops,
458 CLK_SET_RATE_PARENT);
459 ret = clk_hw_register(NULL, &ctx->hw);
460 if (ret)
461 return ret;
462
463 return 0;
464}
465
466static void
467clk_multiple_parents_mux_test_exit(struct kunit *test)
468{
469 struct clk_multiple_parent_ctx *ctx = test->priv;
470
471 clk_hw_unregister(&ctx->hw);
472 clk_hw_unregister(&ctx->parents_ctx[0].hw);
473 clk_hw_unregister(&ctx->parents_ctx[1].hw);
474}
475
476/*
477 * Test that for a clock with multiple parents, clk_get_parent()
478 * actually returns the current one.
479 */
480static void
481clk_test_multiple_parents_mux_get_parent(struct kunit *test)
482{
483 struct clk_multiple_parent_ctx *ctx = test->priv;
484 struct clk_hw *hw = &ctx->hw;
485 struct clk *clk = clk_hw_get_clk(hw, NULL);
486 struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
487
488 KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
489
490 clk_put(parent);
491 clk_put(clk);
492}
493
494/*
495 * Test that for a clock with a multiple parents, clk_has_parent()
496 * actually reports all of them as parents.
497 */
498static void
499clk_test_multiple_parents_mux_has_parent(struct kunit *test)
500{
501 struct clk_multiple_parent_ctx *ctx = test->priv;
502 struct clk_hw *hw = &ctx->hw;
503 struct clk *clk = clk_hw_get_clk(hw, NULL);
504 struct clk *parent;
505
506 parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
507 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
508 clk_put(parent);
509
510 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
511 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
512 clk_put(parent);
513
514 clk_put(clk);
515}
516
517/*
518 * Test that for a clock with a multiple parents, if we set a range on
519 * that clock and the parent is changed, its rate after the reparenting
520 * is still within the range we asked for.
521 *
522 * FIXME: clk_set_parent() only does the reparenting but doesn't
523 * reevaluate whether the new clock rate is within its boundaries or
524 * not.
525 */
526static void
527clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
528{
529 struct clk_multiple_parent_ctx *ctx = test->priv;
530 struct clk_hw *hw = &ctx->hw;
531 struct clk *clk = clk_hw_get_clk(hw, NULL);
532 struct clk *parent1, *parent2;
533 unsigned long rate;
534 int ret;
535
536 kunit_skip(test, "This needs to be fixed in the core.");
537
538 parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
539 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
540 KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
541
542 parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
543 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
544
545 ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
546 KUNIT_ASSERT_EQ(test, ret, 0);
547
548 ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
549 KUNIT_ASSERT_EQ(test, ret, 0);
550
551 ret = clk_set_rate_range(clk,
552 DUMMY_CLOCK_RATE_1 - 1000,
553 DUMMY_CLOCK_RATE_1 + 1000);
554 KUNIT_ASSERT_EQ(test, ret, 0);
555
556 ret = clk_set_parent(clk, parent2);
557 KUNIT_ASSERT_EQ(test, ret, 0);
558
559 rate = clk_get_rate(clk);
560 KUNIT_ASSERT_GT(test, rate, 0);
561 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
562 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
563
564 clk_put(parent2);
565 clk_put(parent1);
566 clk_put(clk);
567}
568
569static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
570 KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
571 KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
572 KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
573 {}
574};
575
576/*
577 * Test suite for a basic mux clock with two parents, with
578 * CLK_SET_RATE_PARENT on the child.
579 *
580 * These tests exercise the consumer API and check that the state of the
581 * child and parents are sane and consistent.
582 */
583static struct kunit_suite
584clk_multiple_parents_mux_test_suite = {
585 .name = "clk-multiple-parents-mux-test",
586 .init = clk_multiple_parents_mux_test_init,
587 .exit = clk_multiple_parents_mux_test_exit,
588 .test_cases = clk_multiple_parents_mux_test_cases,
589};
590
591static int
592clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
593{
594 struct clk_multiple_parent_ctx *ctx;
595 const char *parents[2] = { "missing-parent", "proper-parent"};
596 int ret;
597
598 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
599 if (!ctx)
600 return -ENOMEM;
601 test->priv = ctx;
602
603 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
604 &clk_dummy_rate_ops,
605 0);
606 ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
607 ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
608 if (ret)
609 return ret;
610
611 ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
612 &clk_multiple_parents_mux_ops,
613 CLK_SET_RATE_PARENT);
614 ret = clk_hw_register(NULL, &ctx->hw);
615 if (ret)
616 return ret;
617
618 return 0;
619}
620
621static void
622clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
623{
624 struct clk_multiple_parent_ctx *ctx = test->priv;
625
626 clk_hw_unregister(&ctx->hw);
627 clk_hw_unregister(&ctx->parents_ctx[1].hw);
628}
629
630/*
631 * Test that, for a mux whose current parent hasn't been registered yet and is
632 * thus orphan, clk_get_parent() will return NULL.
633 */
634static void
635clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
636{
637 struct clk_multiple_parent_ctx *ctx = test->priv;
638 struct clk_hw *hw = &ctx->hw;
639 struct clk *clk = clk_hw_get_clk(hw, NULL);
640
641 KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
642
643 clk_put(clk);
644}
645
646/*
647 * Test that, for a mux whose current parent hasn't been registered yet,
648 * calling clk_set_parent() to a valid parent will properly update the
649 * mux parent and its orphan status.
650 */
651static void
652clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
653{
654 struct clk_multiple_parent_ctx *ctx = test->priv;
655 struct clk_hw *hw = &ctx->hw;
656 struct clk *clk = clk_hw_get_clk(hw, NULL);
657 struct clk *parent, *new_parent;
658 int ret;
659
660 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
661 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
662
663 ret = clk_set_parent(clk, parent);
664 KUNIT_ASSERT_EQ(test, ret, 0);
665
666 new_parent = clk_get_parent(clk);
667 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
668 KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
669
670 clk_put(parent);
671 clk_put(clk);
672}
673
674/*
675 * Test that, for a mux that started orphan but got switched to a valid
676 * parent, calling clk_drop_range() on the mux won't affect the parent
677 * rate.
678 */
679static void
680clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
681{
682 struct clk_multiple_parent_ctx *ctx = test->priv;
683 struct clk_hw *hw = &ctx->hw;
684 struct clk *clk = clk_hw_get_clk(hw, NULL);
685 struct clk *parent;
686 unsigned long parent_rate, new_parent_rate;
687 int ret;
688
689 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
690 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
691
692 parent_rate = clk_get_rate(parent);
693 KUNIT_ASSERT_GT(test, parent_rate, 0);
694
695 ret = clk_set_parent(clk, parent);
696 KUNIT_ASSERT_EQ(test, ret, 0);
697
698 ret = clk_drop_range(clk);
699 KUNIT_ASSERT_EQ(test, ret, 0);
700
701 new_parent_rate = clk_get_rate(clk);
702 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
703 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
704
705 clk_put(parent);
706 clk_put(clk);
707}
708
709/*
710 * Test that, for a mux that started orphan but got switched to a valid
711 * parent, the rate of the mux and its new parent are consistent.
712 */
713static void
714clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
715{
716 struct clk_multiple_parent_ctx *ctx = test->priv;
717 struct clk_hw *hw = &ctx->hw;
718 struct clk *clk = clk_hw_get_clk(hw, NULL);
719 struct clk *parent;
720 unsigned long parent_rate, rate;
721 int ret;
722
723 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
724 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
725
726 parent_rate = clk_get_rate(parent);
727 KUNIT_ASSERT_GT(test, parent_rate, 0);
728
729 ret = clk_set_parent(clk, parent);
730 KUNIT_ASSERT_EQ(test, ret, 0);
731
732 rate = clk_get_rate(clk);
733 KUNIT_ASSERT_GT(test, rate, 0);
734 KUNIT_EXPECT_EQ(test, parent_rate, rate);
735
736 clk_put(parent);
737 clk_put(clk);
738}
739
740/*
741 * Test that, for a mux that started orphan but got switched to a valid
742 * parent, calling clk_put() on the mux won't affect the parent rate.
743 */
744static void
745clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
746{
747 struct clk_multiple_parent_ctx *ctx = test->priv;
748 struct clk *clk, *parent;
749 unsigned long parent_rate, new_parent_rate;
750 int ret;
751
752 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
753 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
754
755 clk = clk_hw_get_clk(&ctx->hw, NULL);
756 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
757
758 parent_rate = clk_get_rate(parent);
759 KUNIT_ASSERT_GT(test, parent_rate, 0);
760
761 ret = clk_set_parent(clk, parent);
762 KUNIT_ASSERT_EQ(test, ret, 0);
763
764 clk_put(clk);
765
766 new_parent_rate = clk_get_rate(parent);
767 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
768 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
769
770 clk_put(parent);
771}
772
773/*
774 * Test that, for a mux that started orphan but got switched to a valid
775 * parent, calling clk_set_rate_range() will affect the parent state if
776 * its rate is out of range.
777 */
778static void
779clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
780{
781 struct clk_multiple_parent_ctx *ctx = test->priv;
782 struct clk_hw *hw = &ctx->hw;
783 struct clk *clk = clk_hw_get_clk(hw, NULL);
784 struct clk *parent;
785 unsigned long rate;
786 int ret;
787
788 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
789 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
790
791 ret = clk_set_parent(clk, parent);
792 KUNIT_ASSERT_EQ(test, ret, 0);
793
794 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
795 KUNIT_ASSERT_EQ(test, ret, 0);
796
797 rate = clk_get_rate(clk);
798 KUNIT_ASSERT_GT(test, rate, 0);
799 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
800 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
801
802 clk_put(parent);
803 clk_put(clk);
804}
805
806/*
807 * Test that, for a mux that started orphan but got switched to a valid
808 * parent, calling clk_set_rate_range() won't affect the parent state if
809 * its rate is within range.
810 */
811static void
812clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
813{
814 struct clk_multiple_parent_ctx *ctx = test->priv;
815 struct clk_hw *hw = &ctx->hw;
816 struct clk *clk = clk_hw_get_clk(hw, NULL);
817 struct clk *parent;
818 unsigned long parent_rate, new_parent_rate;
819 int ret;
820
821 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
822 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
823
824 parent_rate = clk_get_rate(parent);
825 KUNIT_ASSERT_GT(test, parent_rate, 0);
826
827 ret = clk_set_parent(clk, parent);
828 KUNIT_ASSERT_EQ(test, ret, 0);
829
830 ret = clk_set_rate_range(clk,
831 DUMMY_CLOCK_INIT_RATE - 1000,
832 DUMMY_CLOCK_INIT_RATE + 1000);
833 KUNIT_ASSERT_EQ(test, ret, 0);
834
835 new_parent_rate = clk_get_rate(parent);
836 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
837 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
838
839 clk_put(parent);
840 clk_put(clk);
841}
842
843/*
844 * Test that, for a mux whose current parent hasn't been registered yet,
845 * calling clk_set_rate_range() will succeed, and will be taken into
846 * account when rounding a rate.
847 */
848static void
849clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
850{
851 struct clk_multiple_parent_ctx *ctx = test->priv;
852 struct clk_hw *hw = &ctx->hw;
853 struct clk *clk = clk_hw_get_clk(hw, NULL);
854 unsigned long rate;
855 int ret;
856
857 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
858 KUNIT_ASSERT_EQ(test, ret, 0);
859
860 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
861 KUNIT_ASSERT_GT(test, rate, 0);
862 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
863 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
864
865 clk_put(clk);
866}
867
868/*
869 * Test that, for a mux that started orphan, was assigned and rate and
870 * then got switched to a valid parent, its rate is eventually within
871 * range.
872 *
873 * FIXME: Even though we update the rate as part of clk_set_parent(), we
874 * don't evaluate whether that new rate is within range and needs to be
875 * adjusted.
876 */
877static void
878clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
879{
880 struct clk_multiple_parent_ctx *ctx = test->priv;
881 struct clk_hw *hw = &ctx->hw;
882 struct clk *clk = clk_hw_get_clk(hw, NULL);
883 struct clk *parent;
884 unsigned long rate;
885 int ret;
886
887 kunit_skip(test, "This needs to be fixed in the core.");
888
889 clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
890
891 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
892 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
893
894 ret = clk_set_parent(clk, parent);
895 KUNIT_ASSERT_EQ(test, ret, 0);
896
897 rate = clk_get_rate(clk);
898 KUNIT_ASSERT_GT(test, rate, 0);
899 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
900 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
901
902 clk_put(parent);
903 clk_put(clk);
904}
905
906static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
907 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
908 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
909 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
910 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
911 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
912 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
913 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
914 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
915 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
916 {}
917};
918
919/*
920 * Test suite for a basic mux clock with two parents. The default parent
921 * isn't registered, only the second parent is. By default, the clock
922 * will thus be orphan.
923 *
924 * These tests exercise the behaviour of the consumer API when dealing
925 * with an orphan clock, and how we deal with the transition to a valid
926 * parent.
927 */
928static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
929 .name = "clk-orphan-transparent-multiple-parent-mux-test",
930 .init = clk_orphan_transparent_multiple_parent_mux_test_init,
931 .exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
932 .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
933};
934
935struct clk_single_parent_ctx {
936 struct clk_dummy_context parent_ctx;
937 struct clk_hw hw;
938};
939
940static int clk_single_parent_mux_test_init(struct kunit *test)
941{
942 struct clk_single_parent_ctx *ctx;
943 int ret;
944
945 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
946 if (!ctx)
947 return -ENOMEM;
948 test->priv = ctx;
949
950 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
951 ctx->parent_ctx.hw.init =
952 CLK_HW_INIT_NO_PARENT("parent-clk",
953 &clk_dummy_rate_ops,
954 0);
955
956 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
957 if (ret)
958 return ret;
959
960 ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
961 &clk_dummy_single_parent_ops,
962 CLK_SET_RATE_PARENT);
963
964 ret = clk_hw_register(NULL, &ctx->hw);
965 if (ret)
966 return ret;
967
968 return 0;
969}
970
971static void
972clk_single_parent_mux_test_exit(struct kunit *test)
973{
974 struct clk_single_parent_ctx *ctx = test->priv;
975
976 clk_hw_unregister(&ctx->hw);
977 clk_hw_unregister(&ctx->parent_ctx.hw);
978}
979
980/*
981 * Test that for a clock with a single parent, clk_get_parent() actually
982 * returns the parent.
983 */
984static void
985clk_test_single_parent_mux_get_parent(struct kunit *test)
986{
987 struct clk_single_parent_ctx *ctx = test->priv;
988 struct clk_hw *hw = &ctx->hw;
989 struct clk *clk = clk_hw_get_clk(hw, NULL);
990 struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
991
992 KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
993
994 clk_put(parent);
995 clk_put(clk);
996}
997
998/*
999 * Test that for a clock with a single parent, clk_has_parent() actually
1000 * reports it as a parent.
1001 */
1002static void
1003clk_test_single_parent_mux_has_parent(struct kunit *test)
1004{
1005 struct clk_single_parent_ctx *ctx = test->priv;
1006 struct clk_hw *hw = &ctx->hw;
1007 struct clk *clk = clk_hw_get_clk(hw, NULL);
1008 struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1009
1010 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1011
1012 clk_put(parent);
1013 clk_put(clk);
1014}
1015
1016/*
1017 * Test that for a clock that can't modify its rate and with a single
1018 * parent, if we set disjoints range on the parent and then the child,
1019 * the second will return an error.
1020 *
1021 * FIXME: clk_set_rate_range() only considers the current clock when
1022 * evaluating whether ranges are disjoints and not the upstream clocks
1023 * ranges.
1024 */
1025static void
1026clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1027{
1028 struct clk_single_parent_ctx *ctx = test->priv;
1029 struct clk_hw *hw = &ctx->hw;
1030 struct clk *clk = clk_hw_get_clk(hw, NULL);
1031 struct clk *parent;
1032 int ret;
1033
1034 kunit_skip(test, "This needs to be fixed in the core.");
1035
1036 parent = clk_get_parent(clk);
1037 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1038
1039 ret = clk_set_rate_range(parent, 1000, 2000);
1040 KUNIT_ASSERT_EQ(test, ret, 0);
1041
1042 ret = clk_set_rate_range(clk, 3000, 4000);
1043 KUNIT_EXPECT_LT(test, ret, 0);
1044
1045 clk_put(clk);
1046}
1047
1048/*
1049 * Test that for a clock that can't modify its rate and with a single
1050 * parent, if we set disjoints range on the child and then the parent,
1051 * the second will return an error.
1052 *
1053 * FIXME: clk_set_rate_range() only considers the current clock when
1054 * evaluating whether ranges are disjoints and not the downstream clocks
1055 * ranges.
1056 */
1057static void
1058clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1059{
1060 struct clk_single_parent_ctx *ctx = test->priv;
1061 struct clk_hw *hw = &ctx->hw;
1062 struct clk *clk = clk_hw_get_clk(hw, NULL);
1063 struct clk *parent;
1064 int ret;
1065
1066 kunit_skip(test, "This needs to be fixed in the core.");
1067
1068 parent = clk_get_parent(clk);
1069 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1070
1071 ret = clk_set_rate_range(clk, 1000, 2000);
1072 KUNIT_ASSERT_EQ(test, ret, 0);
1073
1074 ret = clk_set_rate_range(parent, 3000, 4000);
1075 KUNIT_EXPECT_LT(test, ret, 0);
1076
1077 clk_put(clk);
1078}
1079
1080/*
1081 * Test that for a clock that can't modify its rate and with a single
1082 * parent, if we set a range on the parent and then call
1083 * clk_round_rate(), the boundaries of the parent are taken into
1084 * account.
1085 */
1086static void
1087clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1088{
1089 struct clk_single_parent_ctx *ctx = test->priv;
1090 struct clk_hw *hw = &ctx->hw;
1091 struct clk *clk = clk_hw_get_clk(hw, NULL);
1092 struct clk *parent;
1093 unsigned long rate;
1094 int ret;
1095
1096 parent = clk_get_parent(clk);
1097 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1098
1099 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1100 KUNIT_ASSERT_EQ(test, ret, 0);
1101
1102 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1103 KUNIT_ASSERT_GT(test, rate, 0);
1104 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1105 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1106
1107 clk_put(clk);
1108}
1109
1110/*
1111 * Test that for a clock that can't modify its rate and with a single
1112 * parent, if we set a range on the parent and a more restrictive one on
1113 * the child, and then call clk_round_rate(), the boundaries of the
1114 * two clocks are taken into account.
1115 */
1116static void
1117clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1118{
1119 struct clk_single_parent_ctx *ctx = test->priv;
1120 struct clk_hw *hw = &ctx->hw;
1121 struct clk *clk = clk_hw_get_clk(hw, NULL);
1122 struct clk *parent;
1123 unsigned long rate;
1124 int ret;
1125
1126 parent = clk_get_parent(clk);
1127 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1128
1129 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1130 KUNIT_ASSERT_EQ(test, ret, 0);
1131
1132 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1133 KUNIT_ASSERT_EQ(test, ret, 0);
1134
1135 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1136 KUNIT_ASSERT_GT(test, rate, 0);
1137 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1138 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1139
1140 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1141 KUNIT_ASSERT_GT(test, rate, 0);
1142 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1143 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1144
1145 clk_put(clk);
1146}
1147
1148/*
1149 * Test that for a clock that can't modify its rate and with a single
1150 * parent, if we set a range on the child and a more restrictive one on
1151 * the parent, and then call clk_round_rate(), the boundaries of the
1152 * two clocks are taken into account.
1153 */
1154static void
1155clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1156{
1157 struct clk_single_parent_ctx *ctx = test->priv;
1158 struct clk_hw *hw = &ctx->hw;
1159 struct clk *clk = clk_hw_get_clk(hw, NULL);
1160 struct clk *parent;
1161 unsigned long rate;
1162 int ret;
1163
1164 parent = clk_get_parent(clk);
1165 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1166
1167 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1168 KUNIT_ASSERT_EQ(test, ret, 0);
1169
1170 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1171 KUNIT_ASSERT_EQ(test, ret, 0);
1172
1173 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1174 KUNIT_ASSERT_GT(test, rate, 0);
1175 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1176 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1177
1178 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1179 KUNIT_ASSERT_GT(test, rate, 0);
1180 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1181 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1182
1183 clk_put(clk);
1184}
1185
1186static struct kunit_case clk_single_parent_mux_test_cases[] = {
1187 KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1188 KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1189 KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1190 KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1191 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1192 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1193 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1194 {}
1195};
1196
1197/*
1198 * Test suite for a basic mux clock with one parent, with
1199 * CLK_SET_RATE_PARENT on the child.
1200 *
1201 * These tests exercise the consumer API and check that the state of the
1202 * child and parent are sane and consistent.
1203 */
1204static struct kunit_suite
1205clk_single_parent_mux_test_suite = {
1206 .name = "clk-single-parent-mux-test",
1207 .init = clk_single_parent_mux_test_init,
1208 .exit = clk_single_parent_mux_test_exit,
1209 .test_cases = clk_single_parent_mux_test_cases,
1210};
1211
1212static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1213{
1214 struct clk_single_parent_ctx *ctx;
1215 struct clk_init_data init = { };
1216 const char * const parents[] = { "orphan_parent" };
1217 int ret;
1218
1219 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1220 if (!ctx)
1221 return -ENOMEM;
1222 test->priv = ctx;
1223
1224 init.name = "test_orphan_dummy_parent";
1225 init.ops = &clk_dummy_single_parent_ops;
1226 init.parent_names = parents;
1227 init.num_parents = ARRAY_SIZE(parents);
1228 init.flags = CLK_SET_RATE_PARENT;
1229 ctx->hw.init = &init;
1230
1231 ret = clk_hw_register(NULL, &ctx->hw);
1232 if (ret)
1233 return ret;
1234
1235 memset(&init, 0, sizeof(init));
1236 init.name = "orphan_parent";
1237 init.ops = &clk_dummy_rate_ops;
1238 ctx->parent_ctx.hw.init = &init;
1239 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1240
1241 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1242 if (ret)
1243 return ret;
1244
1245 return 0;
1246}
1247
1248/*
1249 * Test that a mux-only clock, with an initial rate within a range,
1250 * will still have the same rate after the range has been enforced.
1251 *
1252 * See:
1253 * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1254 */
1255static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1256{
1257 struct clk_single_parent_ctx *ctx = test->priv;
1258 struct clk_hw *hw = &ctx->hw;
1259 struct clk *clk = clk_hw_get_clk(hw, NULL);
1260 unsigned long rate, new_rate;
1261
1262 rate = clk_get_rate(clk);
1263 KUNIT_ASSERT_GT(test, rate, 0);
1264
1265 KUNIT_ASSERT_EQ(test,
1266 clk_set_rate_range(clk,
1267 ctx->parent_ctx.rate - 1000,
1268 ctx->parent_ctx.rate + 1000),
1269 0);
1270
1271 new_rate = clk_get_rate(clk);
1272 KUNIT_ASSERT_GT(test, new_rate, 0);
1273 KUNIT_EXPECT_EQ(test, rate, new_rate);
1274
1275 clk_put(clk);
1276}
1277
1278static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1279 KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1280 {}
1281};
1282
1283/*
1284 * Test suite for a basic mux clock with one parent. The parent is
1285 * registered after its child. The clock will thus be an orphan when
1286 * registered, but will no longer be when the tests run.
1287 *
1288 * These tests make sure a clock that used to be orphan has a sane,
1289 * consistent, behaviour.
1290 */
1291static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1292 .name = "clk-orphan-transparent-single-parent-test",
1293 .init = clk_orphan_transparent_single_parent_mux_test_init,
1294 .exit = clk_single_parent_mux_test_exit,
1295 .test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1296};
1297
1298struct clk_single_parent_two_lvl_ctx {
1299 struct clk_dummy_context parent_parent_ctx;
1300 struct clk_dummy_context parent_ctx;
1301 struct clk_hw hw;
1302};
1303
1304static int
1305clk_orphan_two_level_root_last_test_init(struct kunit *test)
1306{
1307 struct clk_single_parent_two_lvl_ctx *ctx;
1308 int ret;
1309
1310 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1311 if (!ctx)
1312 return -ENOMEM;
1313 test->priv = ctx;
1314
1315 ctx->parent_ctx.hw.init =
1316 CLK_HW_INIT("intermediate-parent",
1317 "root-parent",
1318 &clk_dummy_single_parent_ops,
1319 CLK_SET_RATE_PARENT);
1320 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1321 if (ret)
1322 return ret;
1323
1324 ctx->hw.init =
1325 CLK_HW_INIT("test-clk", "intermediate-parent",
1326 &clk_dummy_single_parent_ops,
1327 CLK_SET_RATE_PARENT);
1328 ret = clk_hw_register(NULL, &ctx->hw);
1329 if (ret)
1330 return ret;
1331
1332 ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1333 ctx->parent_parent_ctx.hw.init =
1334 CLK_HW_INIT_NO_PARENT("root-parent",
1335 &clk_dummy_rate_ops,
1336 0);
1337 ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1338 if (ret)
1339 return ret;
1340
1341 return 0;
1342}
1343
1344static void
1345clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1346{
1347 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1348
1349 clk_hw_unregister(&ctx->hw);
1350 clk_hw_unregister(&ctx->parent_ctx.hw);
1351 clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1352}
1353
1354/*
1355 * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1356 * will return the proper rate.
1357 */
1358static void
1359clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1360{
1361 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1362 struct clk_hw *hw = &ctx->hw;
1363 struct clk *clk = clk_hw_get_clk(hw, NULL);
1364 unsigned long rate;
1365
1366 rate = clk_get_rate(clk);
1367 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1368
1369 clk_put(clk);
1370}
1371
1372/*
1373 * Test that, for a clock whose parent used to be orphan,
1374 * clk_set_rate_range() won't affect its rate if it is already within
1375 * range.
1376 *
1377 * See (for Exynos 4210):
1378 * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1379 */
1380static void
1381clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1382{
1383 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1384 struct clk_hw *hw = &ctx->hw;
1385 struct clk *clk = clk_hw_get_clk(hw, NULL);
1386 unsigned long rate;
1387 int ret;
1388
1389 ret = clk_set_rate_range(clk,
1390 DUMMY_CLOCK_INIT_RATE - 1000,
1391 DUMMY_CLOCK_INIT_RATE + 1000);
1392 KUNIT_ASSERT_EQ(test, ret, 0);
1393
1394 rate = clk_get_rate(clk);
1395 KUNIT_ASSERT_GT(test, rate, 0);
1396 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1397
1398 clk_put(clk);
1399}
1400
1401static struct kunit_case
1402clk_orphan_two_level_root_last_test_cases[] = {
1403 KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1404 KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1405 {}
1406};
1407
1408/*
1409 * Test suite for a basic, transparent, clock with a parent that is also
1410 * such a clock. The parent's parent is registered last, while the
1411 * parent and its child are registered in that order. The intermediate
1412 * and leaf clocks will thus be orphan when registered, but the leaf
1413 * clock itself will always have its parent and will never be
1414 * reparented. Indeed, it's only orphan because its parent is.
1415 *
1416 * These tests exercise the behaviour of the consumer API when dealing
1417 * with an orphan clock, and how we deal with the transition to a valid
1418 * parent.
1419 */
1420static struct kunit_suite
1421clk_orphan_two_level_root_last_test_suite = {
1422 .name = "clk-orphan-two-level-root-last-test",
1423 .init = clk_orphan_two_level_root_last_test_init,
1424 .exit = clk_orphan_two_level_root_last_test_exit,
1425 .test_cases = clk_orphan_two_level_root_last_test_cases,
1426};
1427
1428/*
1429 * Test that clk_set_rate_range won't return an error for a valid range
1430 * and that it will make sure the rate of the clock is within the
1431 * boundaries.
1432 */
1433static void clk_range_test_set_range(struct kunit *test)
1434{
1435 struct clk_dummy_context *ctx = test->priv;
1436 struct clk_hw *hw = &ctx->hw;
1437 struct clk *clk = clk_hw_get_clk(hw, NULL);
1438 unsigned long rate;
1439
1440 KUNIT_ASSERT_EQ(test,
1441 clk_set_rate_range(clk,
1442 DUMMY_CLOCK_RATE_1,
1443 DUMMY_CLOCK_RATE_2),
1444 0);
1445
1446 rate = clk_get_rate(clk);
1447 KUNIT_ASSERT_GT(test, rate, 0);
1448 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1449 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1450
1451 clk_put(clk);
1452}
1453
1454/*
1455 * Test that calling clk_set_rate_range with a minimum rate higher than
1456 * the maximum rate returns an error.
1457 */
1458static void clk_range_test_set_range_invalid(struct kunit *test)
1459{
1460 struct clk_dummy_context *ctx = test->priv;
1461 struct clk_hw *hw = &ctx->hw;
1462 struct clk *clk = clk_hw_get_clk(hw, NULL);
1463
1464 KUNIT_EXPECT_LT(test,
1465 clk_set_rate_range(clk,
1466 DUMMY_CLOCK_RATE_1 + 1000,
1467 DUMMY_CLOCK_RATE_1),
1468 0);
1469
1470 clk_put(clk);
1471}
1472
1473/*
1474 * Test that users can't set multiple, disjoints, range that would be
1475 * impossible to meet.
1476 */
1477static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1478{
1479 struct clk_dummy_context *ctx = test->priv;
1480 struct clk_hw *hw = &ctx->hw;
1481 struct clk *user1, *user2;
1482
1483 user1 = clk_hw_get_clk(hw, NULL);
1484 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1485
1486 user2 = clk_hw_get_clk(hw, NULL);
1487 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1488
1489 KUNIT_ASSERT_EQ(test,
1490 clk_set_rate_range(user1, 1000, 2000),
1491 0);
1492
1493 KUNIT_EXPECT_LT(test,
1494 clk_set_rate_range(user2, 3000, 4000),
1495 0);
1496
1497 clk_put(user2);
1498 clk_put(user1);
1499}
1500
1501/*
1502 * Test that if our clock has some boundaries and we try to round a rate
1503 * lower than the minimum, the returned rate will be within range.
1504 */
1505static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1506{
1507 struct clk_dummy_context *ctx = test->priv;
1508 struct clk_hw *hw = &ctx->hw;
1509 struct clk *clk = clk_hw_get_clk(hw, NULL);
1510 long rate;
1511
1512 KUNIT_ASSERT_EQ(test,
1513 clk_set_rate_range(clk,
1514 DUMMY_CLOCK_RATE_1,
1515 DUMMY_CLOCK_RATE_2),
1516 0);
1517
1518 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1519 KUNIT_ASSERT_GT(test, rate, 0);
1520 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1521 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1522
1523 clk_put(clk);
1524}
1525
1526/*
1527 * Test that if our clock has some boundaries and we try to set a rate
1528 * higher than the maximum, the new rate will be within range.
1529 */
1530static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1531{
1532 struct clk_dummy_context *ctx = test->priv;
1533 struct clk_hw *hw = &ctx->hw;
1534 struct clk *clk = clk_hw_get_clk(hw, NULL);
1535 unsigned long rate;
1536
1537 KUNIT_ASSERT_EQ(test,
1538 clk_set_rate_range(clk,
1539 DUMMY_CLOCK_RATE_1,
1540 DUMMY_CLOCK_RATE_2),
1541 0);
1542
1543 KUNIT_ASSERT_EQ(test,
1544 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1545 0);
1546
1547 rate = clk_get_rate(clk);
1548 KUNIT_ASSERT_GT(test, rate, 0);
1549 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1550 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1551
1552 clk_put(clk);
1553}
1554
1555/*
1556 * Test that if our clock has some boundaries and we try to round and
1557 * set a rate lower than the minimum, the rate returned by
1558 * clk_round_rate() will be consistent with the new rate set by
1559 * clk_set_rate().
1560 */
1561static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1562{
1563 struct clk_dummy_context *ctx = test->priv;
1564 struct clk_hw *hw = &ctx->hw;
1565 struct clk *clk = clk_hw_get_clk(hw, NULL);
1566 long rounded;
1567
1568 KUNIT_ASSERT_EQ(test,
1569 clk_set_rate_range(clk,
1570 DUMMY_CLOCK_RATE_1,
1571 DUMMY_CLOCK_RATE_2),
1572 0);
1573
1574 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1575 KUNIT_ASSERT_GT(test, rounded, 0);
1576
1577 KUNIT_ASSERT_EQ(test,
1578 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1579 0);
1580
1581 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1582
1583 clk_put(clk);
1584}
1585
1586/*
1587 * Test that if our clock has some boundaries and we try to round a rate
1588 * higher than the maximum, the returned rate will be within range.
1589 */
1590static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1591{
1592 struct clk_dummy_context *ctx = test->priv;
1593 struct clk_hw *hw = &ctx->hw;
1594 struct clk *clk = clk_hw_get_clk(hw, NULL);
1595 long rate;
1596
1597 KUNIT_ASSERT_EQ(test,
1598 clk_set_rate_range(clk,
1599 DUMMY_CLOCK_RATE_1,
1600 DUMMY_CLOCK_RATE_2),
1601 0);
1602
1603 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1604 KUNIT_ASSERT_GT(test, rate, 0);
1605 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1606 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1607
1608 clk_put(clk);
1609}
1610
1611/*
1612 * Test that if our clock has some boundaries and we try to set a rate
1613 * higher than the maximum, the new rate will be within range.
1614 */
1615static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1616{
1617 struct clk_dummy_context *ctx = test->priv;
1618 struct clk_hw *hw = &ctx->hw;
1619 struct clk *clk = clk_hw_get_clk(hw, NULL);
1620 unsigned long rate;
1621
1622 KUNIT_ASSERT_EQ(test,
1623 clk_set_rate_range(clk,
1624 DUMMY_CLOCK_RATE_1,
1625 DUMMY_CLOCK_RATE_2),
1626 0);
1627
1628 KUNIT_ASSERT_EQ(test,
1629 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1630 0);
1631
1632 rate = clk_get_rate(clk);
1633 KUNIT_ASSERT_GT(test, rate, 0);
1634 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1635 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1636
1637 clk_put(clk);
1638}
1639
1640/*
1641 * Test that if our clock has some boundaries and we try to round and
1642 * set a rate higher than the maximum, the rate returned by
1643 * clk_round_rate() will be consistent with the new rate set by
1644 * clk_set_rate().
1645 */
1646static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1647{
1648 struct clk_dummy_context *ctx = test->priv;
1649 struct clk_hw *hw = &ctx->hw;
1650 struct clk *clk = clk_hw_get_clk(hw, NULL);
1651 long rounded;
1652
1653 KUNIT_ASSERT_EQ(test,
1654 clk_set_rate_range(clk,
1655 DUMMY_CLOCK_RATE_1,
1656 DUMMY_CLOCK_RATE_2),
1657 0);
1658
1659 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1660 KUNIT_ASSERT_GT(test, rounded, 0);
1661
1662 KUNIT_ASSERT_EQ(test,
1663 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1664 0);
1665
1666 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1667
1668 clk_put(clk);
1669}
1670
1671/*
1672 * Test that if our clock has a rate lower than the minimum set by a
1673 * call to clk_set_rate_range(), the rate will be raised to match the
1674 * new minimum.
1675 *
1676 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1677 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1678 */
1679static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1680{
1681 struct clk_dummy_context *ctx = test->priv;
1682 struct clk_hw *hw = &ctx->hw;
1683 struct clk *clk = clk_hw_get_clk(hw, NULL);
1684 unsigned long rate;
1685
1686 KUNIT_ASSERT_EQ(test,
1687 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1688 0);
1689
1690 KUNIT_ASSERT_EQ(test,
1691 clk_set_rate_range(clk,
1692 DUMMY_CLOCK_RATE_1,
1693 DUMMY_CLOCK_RATE_2),
1694 0);
1695
1696 rate = clk_get_rate(clk);
1697 KUNIT_ASSERT_GT(test, rate, 0);
1698 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1699
1700 clk_put(clk);
1701}
1702
1703/*
1704 * Test that if our clock has a rate higher than the maximum set by a
1705 * call to clk_set_rate_range(), the rate will be lowered to match the
1706 * new maximum.
1707 *
1708 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1709 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1710 */
1711static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1712{
1713 struct clk_dummy_context *ctx = test->priv;
1714 struct clk_hw *hw = &ctx->hw;
1715 struct clk *clk = clk_hw_get_clk(hw, NULL);
1716 unsigned long rate;
1717
1718 KUNIT_ASSERT_EQ(test,
1719 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1720 0);
1721
1722 KUNIT_ASSERT_EQ(test,
1723 clk_set_rate_range(clk,
1724 DUMMY_CLOCK_RATE_1,
1725 DUMMY_CLOCK_RATE_2),
1726 0);
1727
1728 rate = clk_get_rate(clk);
1729 KUNIT_ASSERT_GT(test, rate, 0);
1730 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1731
1732 clk_put(clk);
1733}
1734
1735static struct kunit_case clk_range_test_cases[] = {
1736 KUNIT_CASE(clk_range_test_set_range),
1737 KUNIT_CASE(clk_range_test_set_range_invalid),
1738 KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1739 KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1740 KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1741 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1742 KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1743 KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1744 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1745 KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1746 KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1747 {}
1748};
1749
1750/*
1751 * Test suite for a basic rate clock, without any parent.
1752 *
1753 * These tests exercise the rate range API: clk_set_rate_range(),
1754 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1755 */
1756static struct kunit_suite clk_range_test_suite = {
1757 .name = "clk-range-test",
1758 .init = clk_test_init,
1759 .exit = clk_test_exit,
1760 .test_cases = clk_range_test_cases,
1761};
1762
1763/*
1764 * Test that if we have several subsequent calls to
1765 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1766 * needed each and every time.
1767 *
1768 * With clk_dummy_maximize_rate_ops, this means that the rate will
1769 * trail along the maximum as it evolves.
1770 */
1771static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1772{
1773 struct clk_dummy_context *ctx = test->priv;
1774 struct clk_hw *hw = &ctx->hw;
1775 struct clk *clk = clk_hw_get_clk(hw, NULL);
1776 unsigned long rate;
1777
1778 KUNIT_ASSERT_EQ(test,
1779 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1780 0);
1781
1782 KUNIT_ASSERT_EQ(test,
1783 clk_set_rate_range(clk,
1784 DUMMY_CLOCK_RATE_1,
1785 DUMMY_CLOCK_RATE_2),
1786 0);
1787
1788 rate = clk_get_rate(clk);
1789 KUNIT_ASSERT_GT(test, rate, 0);
1790 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1791
1792 KUNIT_ASSERT_EQ(test,
1793 clk_set_rate_range(clk,
1794 DUMMY_CLOCK_RATE_1,
1795 DUMMY_CLOCK_RATE_2 - 1000),
1796 0);
1797
1798 rate = clk_get_rate(clk);
1799 KUNIT_ASSERT_GT(test, rate, 0);
1800 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1801
1802 KUNIT_ASSERT_EQ(test,
1803 clk_set_rate_range(clk,
1804 DUMMY_CLOCK_RATE_1,
1805 DUMMY_CLOCK_RATE_2),
1806 0);
1807
1808 rate = clk_get_rate(clk);
1809 KUNIT_ASSERT_GT(test, rate, 0);
1810 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1811
1812 clk_put(clk);
1813}
1814
1815/*
1816 * Test that if we have several subsequent calls to
1817 * clk_set_rate_range(), across multiple users, the core will reevaluate
1818 * whether a new rate is needed each and every time.
1819 *
1820 * With clk_dummy_maximize_rate_ops, this means that the rate will
1821 * trail along the maximum as it evolves.
1822 */
1823static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1824{
1825 struct clk_dummy_context *ctx = test->priv;
1826 struct clk_hw *hw = &ctx->hw;
1827 struct clk *clk = clk_hw_get_clk(hw, NULL);
1828 struct clk *user1, *user2;
1829 unsigned long rate;
1830
1831 user1 = clk_hw_get_clk(hw, NULL);
1832 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1833
1834 user2 = clk_hw_get_clk(hw, NULL);
1835 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1836
1837 KUNIT_ASSERT_EQ(test,
1838 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1839 0);
1840
1841 KUNIT_ASSERT_EQ(test,
1842 clk_set_rate_range(user1,
1843 0,
1844 DUMMY_CLOCK_RATE_2),
1845 0);
1846
1847 rate = clk_get_rate(clk);
1848 KUNIT_ASSERT_GT(test, rate, 0);
1849 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1850
1851 KUNIT_ASSERT_EQ(test,
1852 clk_set_rate_range(user2,
1853 0,
1854 DUMMY_CLOCK_RATE_1),
1855 0);
1856
1857 rate = clk_get_rate(clk);
1858 KUNIT_ASSERT_GT(test, rate, 0);
1859 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1860
1861 KUNIT_ASSERT_EQ(test,
1862 clk_drop_range(user2),
1863 0);
1864
1865 rate = clk_get_rate(clk);
1866 KUNIT_ASSERT_GT(test, rate, 0);
1867 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1868
1869 clk_put(user2);
1870 clk_put(user1);
1871 clk_put(clk);
1872}
1873
1874/*
1875 * Test that if we have several subsequent calls to
1876 * clk_set_rate_range(), across multiple users, the core will reevaluate
1877 * whether a new rate is needed, including when a user drop its clock.
1878 *
1879 * With clk_dummy_maximize_rate_ops, this means that the rate will
1880 * trail along the maximum as it evolves.
1881 */
1882static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1883{
1884 struct clk_dummy_context *ctx = test->priv;
1885 struct clk_hw *hw = &ctx->hw;
1886 struct clk *clk = clk_hw_get_clk(hw, NULL);
1887 struct clk *user1, *user2;
1888 unsigned long rate;
1889
1890 user1 = clk_hw_get_clk(hw, NULL);
1891 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1892
1893 user2 = clk_hw_get_clk(hw, NULL);
1894 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1895
1896 KUNIT_ASSERT_EQ(test,
1897 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1898 0);
1899
1900 KUNIT_ASSERT_EQ(test,
1901 clk_set_rate_range(user1,
1902 0,
1903 DUMMY_CLOCK_RATE_2),
1904 0);
1905
1906 rate = clk_get_rate(clk);
1907 KUNIT_ASSERT_GT(test, rate, 0);
1908 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1909
1910 KUNIT_ASSERT_EQ(test,
1911 clk_set_rate_range(user2,
1912 0,
1913 DUMMY_CLOCK_RATE_1),
1914 0);
1915
1916 rate = clk_get_rate(clk);
1917 KUNIT_ASSERT_GT(test, rate, 0);
1918 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1919
1920 clk_put(user2);
1921
1922 rate = clk_get_rate(clk);
1923 KUNIT_ASSERT_GT(test, rate, 0);
1924 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1925
1926 clk_put(user1);
1927 clk_put(clk);
1928}
1929
1930static struct kunit_case clk_range_maximize_test_cases[] = {
1931 KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1932 KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1933 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1934 {}
1935};
1936
1937/*
1938 * Test suite for a basic rate clock, without any parent.
1939 *
1940 * These tests exercise the rate range API: clk_set_rate_range(),
1941 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1942 * driver that will always try to run at the highest possible rate.
1943 */
1944static struct kunit_suite clk_range_maximize_test_suite = {
1945 .name = "clk-range-maximize-test",
1946 .init = clk_maximize_test_init,
1947 .exit = clk_test_exit,
1948 .test_cases = clk_range_maximize_test_cases,
1949};
1950
1951/*
1952 * Test that if we have several subsequent calls to
1953 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1954 * needed each and every time.
1955 *
1956 * With clk_dummy_minimize_rate_ops, this means that the rate will
1957 * trail along the minimum as it evolves.
1958 */
1959static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1960{
1961 struct clk_dummy_context *ctx = test->priv;
1962 struct clk_hw *hw = &ctx->hw;
1963 struct clk *clk = clk_hw_get_clk(hw, NULL);
1964 unsigned long rate;
1965
1966 KUNIT_ASSERT_EQ(test,
1967 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1968 0);
1969
1970 KUNIT_ASSERT_EQ(test,
1971 clk_set_rate_range(clk,
1972 DUMMY_CLOCK_RATE_1,
1973 DUMMY_CLOCK_RATE_2),
1974 0);
1975
1976 rate = clk_get_rate(clk);
1977 KUNIT_ASSERT_GT(test, rate, 0);
1978 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1979
1980 KUNIT_ASSERT_EQ(test,
1981 clk_set_rate_range(clk,
1982 DUMMY_CLOCK_RATE_1 + 1000,
1983 DUMMY_CLOCK_RATE_2),
1984 0);
1985
1986 rate = clk_get_rate(clk);
1987 KUNIT_ASSERT_GT(test, rate, 0);
1988 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1989
1990 KUNIT_ASSERT_EQ(test,
1991 clk_set_rate_range(clk,
1992 DUMMY_CLOCK_RATE_1,
1993 DUMMY_CLOCK_RATE_2),
1994 0);
1995
1996 rate = clk_get_rate(clk);
1997 KUNIT_ASSERT_GT(test, rate, 0);
1998 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1999
2000 clk_put(clk);
2001}
2002
2003/*
2004 * Test that if we have several subsequent calls to
2005 * clk_set_rate_range(), across multiple users, the core will reevaluate
2006 * whether a new rate is needed each and every time.
2007 *
2008 * With clk_dummy_minimize_rate_ops, this means that the rate will
2009 * trail along the minimum as it evolves.
2010 */
2011static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2012{
2013 struct clk_dummy_context *ctx = test->priv;
2014 struct clk_hw *hw = &ctx->hw;
2015 struct clk *clk = clk_hw_get_clk(hw, NULL);
2016 struct clk *user1, *user2;
2017 unsigned long rate;
2018
2019 user1 = clk_hw_get_clk(hw, NULL);
2020 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2021
2022 user2 = clk_hw_get_clk(hw, NULL);
2023 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2024
2025 KUNIT_ASSERT_EQ(test,
2026 clk_set_rate_range(user1,
2027 DUMMY_CLOCK_RATE_1,
2028 ULONG_MAX),
2029 0);
2030
2031 rate = clk_get_rate(clk);
2032 KUNIT_ASSERT_GT(test, rate, 0);
2033 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2034
2035 KUNIT_ASSERT_EQ(test,
2036 clk_set_rate_range(user2,
2037 DUMMY_CLOCK_RATE_2,
2038 ULONG_MAX),
2039 0);
2040
2041 rate = clk_get_rate(clk);
2042 KUNIT_ASSERT_GT(test, rate, 0);
2043 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2044
2045 KUNIT_ASSERT_EQ(test,
2046 clk_drop_range(user2),
2047 0);
2048
2049 rate = clk_get_rate(clk);
2050 KUNIT_ASSERT_GT(test, rate, 0);
2051 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2052
2053 clk_put(user2);
2054 clk_put(user1);
2055 clk_put(clk);
2056}
2057
2058/*
2059 * Test that if we have several subsequent calls to
2060 * clk_set_rate_range(), across multiple users, the core will reevaluate
2061 * whether a new rate is needed, including when a user drop its clock.
2062 *
2063 * With clk_dummy_minimize_rate_ops, this means that the rate will
2064 * trail along the minimum as it evolves.
2065 */
2066static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2067{
2068 struct clk_dummy_context *ctx = test->priv;
2069 struct clk_hw *hw = &ctx->hw;
2070 struct clk *clk = clk_hw_get_clk(hw, NULL);
2071 struct clk *user1, *user2;
2072 unsigned long rate;
2073
2074 user1 = clk_hw_get_clk(hw, NULL);
2075 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2076
2077 user2 = clk_hw_get_clk(hw, NULL);
2078 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2079
2080 KUNIT_ASSERT_EQ(test,
2081 clk_set_rate_range(user1,
2082 DUMMY_CLOCK_RATE_1,
2083 ULONG_MAX),
2084 0);
2085
2086 rate = clk_get_rate(clk);
2087 KUNIT_ASSERT_GT(test, rate, 0);
2088 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2089
2090 KUNIT_ASSERT_EQ(test,
2091 clk_set_rate_range(user2,
2092 DUMMY_CLOCK_RATE_2,
2093 ULONG_MAX),
2094 0);
2095
2096 rate = clk_get_rate(clk);
2097 KUNIT_ASSERT_GT(test, rate, 0);
2098 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2099
2100 clk_put(user2);
2101
2102 rate = clk_get_rate(clk);
2103 KUNIT_ASSERT_GT(test, rate, 0);
2104 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2105
2106 clk_put(user1);
2107 clk_put(clk);
2108}
2109
2110static struct kunit_case clk_range_minimize_test_cases[] = {
2111 KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2112 KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2113 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2114 {}
2115};
2116
2117/*
2118 * Test suite for a basic rate clock, without any parent.
2119 *
2120 * These tests exercise the rate range API: clk_set_rate_range(),
2121 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2122 * driver that will always try to run at the lowest possible rate.
2123 */
2124static struct kunit_suite clk_range_minimize_test_suite = {
2125 .name = "clk-range-minimize-test",
2126 .init = clk_minimize_test_init,
2127 .exit = clk_test_exit,
2128 .test_cases = clk_range_minimize_test_cases,
2129};
2130
2131struct clk_leaf_mux_ctx {
2132 struct clk_multiple_parent_ctx mux_ctx;
2133 struct clk_hw hw;
2134};
2135
2136static int
2137clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2138{
2139 struct clk_leaf_mux_ctx *ctx;
2140 const char *top_parents[2] = { "parent-0", "parent-1" };
2141 int ret;
2142
2143 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2144 if (!ctx)
2145 return -ENOMEM;
2146 test->priv = ctx;
2147
2148 ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2149 &clk_dummy_rate_ops,
2150 0);
2151 ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2152 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2153 if (ret)
2154 return ret;
2155
2156 ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2157 &clk_dummy_rate_ops,
2158 0);
2159 ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2160 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2161 if (ret)
2162 return ret;
2163
2164 ctx->mux_ctx.current_parent = 0;
2165 ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2166 &clk_multiple_parents_mux_ops,
2167 0);
2168 ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2169 if (ret)
2170 return ret;
2171
2172 ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw,
2173 &clk_dummy_single_parent_ops,
2174 CLK_SET_RATE_PARENT);
2175 ret = clk_hw_register(NULL, &ctx->hw);
2176 if (ret)
2177 return ret;
2178
2179 return 0;
2180}
2181
2182static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2183{
2184 struct clk_leaf_mux_ctx *ctx = test->priv;
2185
2186 clk_hw_unregister(&ctx->hw);
2187 clk_hw_unregister(&ctx->mux_ctx.hw);
2188 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2189 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2190}
2191
2192/*
2193 * Test that, for a clock that will forward any rate request to its
2194 * parent, the rate request structure returned by __clk_determine_rate
2195 * is sane and will be what we expect.
2196 */
2197static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test)
2198{
2199 struct clk_leaf_mux_ctx *ctx = test->priv;
2200 struct clk_hw *hw = &ctx->hw;
2201 struct clk *clk = clk_hw_get_clk(hw, NULL);
2202 struct clk_rate_request req;
2203 unsigned long rate;
2204 int ret;
2205
2206 rate = clk_get_rate(clk);
2207 KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2208
2209 clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2);
2210
2211 ret = __clk_determine_rate(hw, &req);
2212 KUNIT_ASSERT_EQ(test, ret, 0);
2213
2214 KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2215 KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2216 KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2217
2218 clk_put(clk);
2219}
2220
2221static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2222 KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate),
2223 {}
2224};
2225
2226/*
2227 * Test suite for a clock whose parent is a mux with multiple parents.
2228 * The leaf clock has CLK_SET_RATE_PARENT, and will forward rate
2229 * requests to the mux, which will then select which parent is the best
2230 * fit for a given rate.
2231 *
2232 * These tests exercise the behaviour of muxes, and the proper selection
2233 * of parents.
2234 */
2235static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2236 .name = "clk-leaf-mux-set-rate-parent",
2237 .init = clk_leaf_mux_set_rate_parent_test_init,
2238 .exit = clk_leaf_mux_set_rate_parent_test_exit,
2239 .test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2240};
2241
2242struct clk_mux_notifier_rate_change {
2243 bool done;
2244 unsigned long old_rate;
2245 unsigned long new_rate;
2246 wait_queue_head_t wq;
2247};
2248
2249struct clk_mux_notifier_ctx {
2250 struct clk_multiple_parent_ctx mux_ctx;
2251 struct clk *clk;
2252 struct notifier_block clk_nb;
2253 struct clk_mux_notifier_rate_change pre_rate_change;
2254 struct clk_mux_notifier_rate_change post_rate_change;
2255};
2256
2257#define NOTIFIER_TIMEOUT_MS 100
2258
2259static int clk_mux_notifier_callback(struct notifier_block *nb,
2260 unsigned long action, void *data)
2261{
2262 struct clk_notifier_data *clk_data = data;
2263 struct clk_mux_notifier_ctx *ctx = container_of(nb,
2264 struct clk_mux_notifier_ctx,
2265 clk_nb);
2266
2267 if (action & PRE_RATE_CHANGE) {
2268 ctx->pre_rate_change.old_rate = clk_data->old_rate;
2269 ctx->pre_rate_change.new_rate = clk_data->new_rate;
2270 ctx->pre_rate_change.done = true;
2271 wake_up_interruptible(&ctx->pre_rate_change.wq);
2272 }
2273
2274 if (action & POST_RATE_CHANGE) {
2275 ctx->post_rate_change.old_rate = clk_data->old_rate;
2276 ctx->post_rate_change.new_rate = clk_data->new_rate;
2277 ctx->post_rate_change.done = true;
2278 wake_up_interruptible(&ctx->post_rate_change.wq);
2279 }
2280
2281 return 0;
2282}
2283
2284static int clk_mux_notifier_test_init(struct kunit *test)
2285{
2286 struct clk_mux_notifier_ctx *ctx;
2287 const char *top_parents[2] = { "parent-0", "parent-1" };
2288 int ret;
2289
2290 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2291 if (!ctx)
2292 return -ENOMEM;
2293 test->priv = ctx;
2294 ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2295 init_waitqueue_head(&ctx->pre_rate_change.wq);
2296 init_waitqueue_head(&ctx->post_rate_change.wq);
2297
2298 ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2299 &clk_dummy_rate_ops,
2300 0);
2301 ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2302 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2303 if (ret)
2304 return ret;
2305
2306 ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2307 &clk_dummy_rate_ops,
2308 0);
2309 ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2310 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2311 if (ret)
2312 return ret;
2313
2314 ctx->mux_ctx.current_parent = 0;
2315 ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2316 &clk_multiple_parents_mux_ops,
2317 0);
2318 ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2319 if (ret)
2320 return ret;
2321
2322 ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2323 ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2324 if (ret)
2325 return ret;
2326
2327 return 0;
2328}
2329
2330static void clk_mux_notifier_test_exit(struct kunit *test)
2331{
2332 struct clk_mux_notifier_ctx *ctx = test->priv;
2333 struct clk *clk = ctx->clk;
2334
2335 clk_notifier_unregister(clk, &ctx->clk_nb);
2336 clk_put(clk);
2337
2338 clk_hw_unregister(&ctx->mux_ctx.hw);
2339 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2340 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2341}
2342
2343/*
2344 * Test that if the we have a notifier registered on a mux, the core
2345 * will notify us when we switch to another parent, and with the proper
2346 * old and new rates.
2347 */
2348static void clk_mux_notifier_set_parent_test(struct kunit *test)
2349{
2350 struct clk_mux_notifier_ctx *ctx = test->priv;
2351 struct clk_hw *hw = &ctx->mux_ctx.hw;
2352 struct clk *clk = clk_hw_get_clk(hw, NULL);
2353 struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2354 int ret;
2355
2356 ret = clk_set_parent(clk, new_parent);
2357 KUNIT_ASSERT_EQ(test, ret, 0);
2358
2359 ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2360 ctx->pre_rate_change.done,
2361 msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2362 KUNIT_ASSERT_GT(test, ret, 0);
2363
2364 KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2365 KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2366
2367 ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2368 ctx->post_rate_change.done,
2369 msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2370 KUNIT_ASSERT_GT(test, ret, 0);
2371
2372 KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2373 KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2374
2375 clk_put(new_parent);
2376 clk_put(clk);
2377}
2378
2379static struct kunit_case clk_mux_notifier_test_cases[] = {
2380 KUNIT_CASE(clk_mux_notifier_set_parent_test),
2381 {}
2382};
2383
2384/*
2385 * Test suite for a mux with multiple parents, and a notifier registered
2386 * on the mux.
2387 *
2388 * These tests exercise the behaviour of notifiers.
2389 */
2390static struct kunit_suite clk_mux_notifier_test_suite = {
2391 .name = "clk-mux-notifier",
2392 .init = clk_mux_notifier_test_init,
2393 .exit = clk_mux_notifier_test_exit,
2394 .test_cases = clk_mux_notifier_test_cases,
2395};
2396
2397kunit_test_suites(
2398 &clk_leaf_mux_set_rate_parent_test_suite,
2399 &clk_test_suite,
2400 &clk_multiple_parents_mux_test_suite,
2401 &clk_mux_notifier_test_suite,
2402 &clk_orphan_transparent_multiple_parent_mux_test_suite,
2403 &clk_orphan_transparent_single_parent_test_suite,
2404 &clk_orphan_two_level_root_last_test_suite,
2405 &clk_range_test_suite,
2406 &clk_range_maximize_test_suite,
2407 &clk_range_minimize_test_suite,
2408 &clk_single_parent_mux_test_suite,
2409 &clk_uncached_test_suite
2410);
2411MODULE_LICENSE("GPL v2");