Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kunit tests for clk framework
4 */
5#include <linux/clk.h>
6#include <linux/clk-provider.h>
7#include <linux/clk/clk-conf.h>
8#include <linux/of.h>
9#include <linux/platform_device.h>
10
11/* Needed for clk_hw_get_clk() */
12#include "clk.h"
13
14#include <kunit/clk.h>
15#include <kunit/of.h>
16#include <kunit/platform_device.h>
17#include <kunit/test.h>
18
19#include "kunit_clk_assigned_rates.h"
20#include "clk_parent_data_test.h"
21
22static const struct clk_ops empty_clk_ops = { };
23
24#define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
25#define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000)
26#define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000)
27
28struct clk_dummy_context {
29 struct clk_hw hw;
30 unsigned long rate;
31};
32
33static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
34 unsigned long parent_rate)
35{
36 struct clk_dummy_context *ctx =
37 container_of(hw, struct clk_dummy_context, hw);
38
39 return ctx->rate;
40}
41
42static int clk_dummy_determine_rate(struct clk_hw *hw,
43 struct clk_rate_request *req)
44{
45 /* Just return the same rate without modifying it */
46 return 0;
47}
48
49static int clk_dummy_maximize_rate(struct clk_hw *hw,
50 struct clk_rate_request *req)
51{
52 /*
53 * If there's a maximum set, always run the clock at the maximum
54 * allowed.
55 */
56 if (req->max_rate < ULONG_MAX)
57 req->rate = req->max_rate;
58
59 return 0;
60}
61
62static int clk_dummy_minimize_rate(struct clk_hw *hw,
63 struct clk_rate_request *req)
64{
65 /*
66 * If there's a minimum set, always run the clock at the minimum
67 * allowed.
68 */
69 if (req->min_rate > 0)
70 req->rate = req->min_rate;
71
72 return 0;
73}
74
75static int clk_dummy_set_rate(struct clk_hw *hw,
76 unsigned long rate,
77 unsigned long parent_rate)
78{
79 struct clk_dummy_context *ctx =
80 container_of(hw, struct clk_dummy_context, hw);
81
82 ctx->rate = rate;
83 return 0;
84}
85
86static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
87{
88 if (index >= clk_hw_get_num_parents(hw))
89 return -EINVAL;
90
91 return 0;
92}
93
94static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
95{
96 return 0;
97}
98
99static const struct clk_ops clk_dummy_rate_ops = {
100 .recalc_rate = clk_dummy_recalc_rate,
101 .determine_rate = clk_dummy_determine_rate,
102 .set_rate = clk_dummy_set_rate,
103};
104
105static const struct clk_ops clk_dummy_maximize_rate_ops = {
106 .recalc_rate = clk_dummy_recalc_rate,
107 .determine_rate = clk_dummy_maximize_rate,
108 .set_rate = clk_dummy_set_rate,
109};
110
111static const struct clk_ops clk_dummy_minimize_rate_ops = {
112 .recalc_rate = clk_dummy_recalc_rate,
113 .determine_rate = clk_dummy_minimize_rate,
114 .set_rate = clk_dummy_set_rate,
115};
116
117static const struct clk_ops clk_dummy_single_parent_ops = {
118 /*
119 * FIXME: Even though we should probably be able to use
120 * __clk_mux_determine_rate() here, if we use it and call
121 * clk_round_rate() or clk_set_rate() with a rate lower than
122 * what all the parents can provide, it will return -EINVAL.
123 *
124 * This is due to the fact that it has the undocumented
125 * behaviour to always pick up the closest rate higher than the
126 * requested rate. If we get something lower, it thus considers
127 * that it's not acceptable and will return an error.
128 *
129 * It's somewhat inconsistent and creates a weird threshold
130 * between rates above the parent rate which would be rounded to
131 * what the parent can provide, but rates below will simply
132 * return an error.
133 */
134 .determine_rate = __clk_mux_determine_rate_closest,
135 .set_parent = clk_dummy_single_set_parent,
136 .get_parent = clk_dummy_single_get_parent,
137};
138
139struct clk_multiple_parent_ctx {
140 struct clk_dummy_context parents_ctx[2];
141 struct clk_hw hw;
142 u8 current_parent;
143};
144
145static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
146{
147 struct clk_multiple_parent_ctx *ctx =
148 container_of(hw, struct clk_multiple_parent_ctx, hw);
149
150 if (index >= clk_hw_get_num_parents(hw))
151 return -EINVAL;
152
153 ctx->current_parent = index;
154
155 return 0;
156}
157
158static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
159{
160 struct clk_multiple_parent_ctx *ctx =
161 container_of(hw, struct clk_multiple_parent_ctx, hw);
162
163 return ctx->current_parent;
164}
165
166static const struct clk_ops clk_multiple_parents_mux_ops = {
167 .get_parent = clk_multiple_parents_mux_get_parent,
168 .set_parent = clk_multiple_parents_mux_set_parent,
169 .determine_rate = __clk_mux_determine_rate_closest,
170};
171
172static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
173 .determine_rate = clk_hw_determine_rate_no_reparent,
174 .get_parent = clk_multiple_parents_mux_get_parent,
175 .set_parent = clk_multiple_parents_mux_set_parent,
176};
177
178static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
179{
180 struct clk_dummy_context *ctx;
181 struct clk_init_data init = { };
182 int ret;
183
184 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
185 if (!ctx)
186 return -ENOMEM;
187 ctx->rate = DUMMY_CLOCK_INIT_RATE;
188 test->priv = ctx;
189
190 init.name = "test_dummy_rate";
191 init.ops = ops;
192 ctx->hw.init = &init;
193
194 ret = clk_hw_register(NULL, &ctx->hw);
195 if (ret)
196 return ret;
197
198 return 0;
199}
200
201static int clk_test_init(struct kunit *test)
202{
203 return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
204}
205
206static int clk_maximize_test_init(struct kunit *test)
207{
208 return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
209}
210
211static int clk_minimize_test_init(struct kunit *test)
212{
213 return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
214}
215
216static void clk_test_exit(struct kunit *test)
217{
218 struct clk_dummy_context *ctx = test->priv;
219
220 clk_hw_unregister(&ctx->hw);
221}
222
223/*
224 * Test that the actual rate matches what is returned by clk_get_rate()
225 */
226static void clk_test_get_rate(struct kunit *test)
227{
228 struct clk_dummy_context *ctx = test->priv;
229 struct clk_hw *hw = &ctx->hw;
230 struct clk *clk = clk_hw_get_clk(hw, NULL);
231 unsigned long rate;
232
233 rate = clk_get_rate(clk);
234 KUNIT_ASSERT_GT(test, rate, 0);
235 KUNIT_EXPECT_EQ(test, rate, ctx->rate);
236
237 clk_put(clk);
238}
239
240/*
241 * Test that, after a call to clk_set_rate(), the rate returned by
242 * clk_get_rate() matches.
243 *
244 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
245 * modify the requested rate, which is our case in clk_dummy_rate_ops.
246 */
247static void clk_test_set_get_rate(struct kunit *test)
248{
249 struct clk_dummy_context *ctx = test->priv;
250 struct clk_hw *hw = &ctx->hw;
251 struct clk *clk = clk_hw_get_clk(hw, NULL);
252 unsigned long rate;
253
254 KUNIT_ASSERT_EQ(test,
255 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
256 0);
257
258 rate = clk_get_rate(clk);
259 KUNIT_ASSERT_GT(test, rate, 0);
260 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
261
262 clk_put(clk);
263}
264
265/*
266 * Test that, after several calls to clk_set_rate(), the rate returned
267 * by clk_get_rate() matches the last one.
268 *
269 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
270 * modify the requested rate, which is our case in clk_dummy_rate_ops.
271 */
272static void clk_test_set_set_get_rate(struct kunit *test)
273{
274 struct clk_dummy_context *ctx = test->priv;
275 struct clk_hw *hw = &ctx->hw;
276 struct clk *clk = clk_hw_get_clk(hw, NULL);
277 unsigned long rate;
278
279 KUNIT_ASSERT_EQ(test,
280 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
281 0);
282
283 KUNIT_ASSERT_EQ(test,
284 clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
285 0);
286
287 rate = clk_get_rate(clk);
288 KUNIT_ASSERT_GT(test, rate, 0);
289 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
290
291 clk_put(clk);
292}
293
294/*
295 * Test that clk_round_rate and clk_set_rate are consitent and will
296 * return the same frequency.
297 */
298static void clk_test_round_set_get_rate(struct kunit *test)
299{
300 struct clk_dummy_context *ctx = test->priv;
301 struct clk_hw *hw = &ctx->hw;
302 struct clk *clk = clk_hw_get_clk(hw, NULL);
303 unsigned long set_rate;
304 long rounded_rate;
305
306 rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
307 KUNIT_ASSERT_GT(test, rounded_rate, 0);
308 KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
309
310 KUNIT_ASSERT_EQ(test,
311 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
312 0);
313
314 set_rate = clk_get_rate(clk);
315 KUNIT_ASSERT_GT(test, set_rate, 0);
316 KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
317
318 clk_put(clk);
319}
320
321static struct kunit_case clk_test_cases[] = {
322 KUNIT_CASE(clk_test_get_rate),
323 KUNIT_CASE(clk_test_set_get_rate),
324 KUNIT_CASE(clk_test_set_set_get_rate),
325 KUNIT_CASE(clk_test_round_set_get_rate),
326 {}
327};
328
329/*
330 * Test suite for a basic rate clock, without any parent.
331 *
332 * These tests exercise the rate API with simple scenarios
333 */
334static struct kunit_suite clk_test_suite = {
335 .name = "clk-test",
336 .init = clk_test_init,
337 .exit = clk_test_exit,
338 .test_cases = clk_test_cases,
339};
340
341static int clk_uncached_test_init(struct kunit *test)
342{
343 struct clk_dummy_context *ctx;
344 int ret;
345
346 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
347 if (!ctx)
348 return -ENOMEM;
349 test->priv = ctx;
350
351 ctx->rate = DUMMY_CLOCK_INIT_RATE;
352 ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
353 &clk_dummy_rate_ops,
354 CLK_GET_RATE_NOCACHE);
355
356 ret = clk_hw_register(NULL, &ctx->hw);
357 if (ret)
358 return ret;
359
360 return 0;
361}
362
363/*
364 * Test that for an uncached clock, the clock framework doesn't cache
365 * the rate and clk_get_rate() will return the underlying clock rate
366 * even if it changed.
367 */
368static void clk_test_uncached_get_rate(struct kunit *test)
369{
370 struct clk_dummy_context *ctx = test->priv;
371 struct clk_hw *hw = &ctx->hw;
372 struct clk *clk = clk_hw_get_clk(hw, NULL);
373 unsigned long rate;
374
375 rate = clk_get_rate(clk);
376 KUNIT_ASSERT_GT(test, rate, 0);
377 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
378
379 /* We change the rate behind the clock framework's back */
380 ctx->rate = DUMMY_CLOCK_RATE_1;
381 rate = clk_get_rate(clk);
382 KUNIT_ASSERT_GT(test, rate, 0);
383 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
384
385 clk_put(clk);
386}
387
388/*
389 * Test that for an uncached clock, clk_set_rate_range() will work
390 * properly if the rate hasn't changed.
391 */
392static void clk_test_uncached_set_range(struct kunit *test)
393{
394 struct clk_dummy_context *ctx = test->priv;
395 struct clk_hw *hw = &ctx->hw;
396 struct clk *clk = clk_hw_get_clk(hw, NULL);
397 unsigned long rate;
398
399 KUNIT_ASSERT_EQ(test,
400 clk_set_rate_range(clk,
401 DUMMY_CLOCK_RATE_1,
402 DUMMY_CLOCK_RATE_2),
403 0);
404
405 rate = clk_get_rate(clk);
406 KUNIT_ASSERT_GT(test, rate, 0);
407 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
408 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
409
410 clk_put(clk);
411}
412
413/*
414 * Test that for an uncached clock, clk_set_rate_range() will work
415 * properly if the rate has changed in hardware.
416 *
417 * In this case, it means that if the rate wasn't initially in the range
418 * we're trying to set, but got changed at some point into the range
419 * without the kernel knowing about it, its rate shouldn't be affected.
420 */
421static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
422{
423 struct clk_dummy_context *ctx = test->priv;
424 struct clk_hw *hw = &ctx->hw;
425 struct clk *clk = clk_hw_get_clk(hw, NULL);
426 unsigned long rate;
427
428 /* We change the rate behind the clock framework's back */
429 ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
430 KUNIT_ASSERT_EQ(test,
431 clk_set_rate_range(clk,
432 DUMMY_CLOCK_RATE_1,
433 DUMMY_CLOCK_RATE_2),
434 0);
435
436 rate = clk_get_rate(clk);
437 KUNIT_ASSERT_GT(test, rate, 0);
438 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
439
440 clk_put(clk);
441}
442
443static struct kunit_case clk_uncached_test_cases[] = {
444 KUNIT_CASE(clk_test_uncached_get_rate),
445 KUNIT_CASE(clk_test_uncached_set_range),
446 KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
447 {}
448};
449
450/*
451 * Test suite for a basic, uncached, rate clock, without any parent.
452 *
453 * These tests exercise the rate API with simple scenarios
454 */
455static struct kunit_suite clk_uncached_test_suite = {
456 .name = "clk-uncached-test",
457 .init = clk_uncached_test_init,
458 .exit = clk_test_exit,
459 .test_cases = clk_uncached_test_cases,
460};
461
462static int
463clk_multiple_parents_mux_test_init(struct kunit *test)
464{
465 struct clk_multiple_parent_ctx *ctx;
466 const char *parents[2] = { "parent-0", "parent-1"};
467 int ret;
468
469 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
470 if (!ctx)
471 return -ENOMEM;
472 test->priv = ctx;
473
474 ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
475 &clk_dummy_rate_ops,
476 0);
477 ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
478 ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[0].hw);
479 if (ret)
480 return ret;
481
482 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
483 &clk_dummy_rate_ops,
484 0);
485 ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
486 ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
487 if (ret)
488 return ret;
489
490 ctx->current_parent = 0;
491 ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
492 &clk_multiple_parents_mux_ops,
493 CLK_SET_RATE_PARENT);
494 ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
495 if (ret)
496 return ret;
497
498 return 0;
499}
500
501/*
502 * Test that for a clock with multiple parents, clk_get_parent()
503 * actually returns the current one.
504 */
505static void
506clk_test_multiple_parents_mux_get_parent(struct kunit *test)
507{
508 struct clk_multiple_parent_ctx *ctx = test->priv;
509 struct clk_hw *hw = &ctx->hw;
510 struct clk *clk = clk_hw_get_clk(hw, NULL);
511 struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
512
513 KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
514
515 clk_put(parent);
516 clk_put(clk);
517}
518
519/*
520 * Test that for a clock with a multiple parents, clk_has_parent()
521 * actually reports all of them as parents.
522 */
523static void
524clk_test_multiple_parents_mux_has_parent(struct kunit *test)
525{
526 struct clk_multiple_parent_ctx *ctx = test->priv;
527 struct clk_hw *hw = &ctx->hw;
528 struct clk *clk = clk_hw_get_clk(hw, NULL);
529 struct clk *parent;
530
531 parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
532 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
533 clk_put(parent);
534
535 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
536 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
537 clk_put(parent);
538
539 clk_put(clk);
540}
541
542/*
543 * Test that for a clock with a multiple parents, if we set a range on
544 * that clock and the parent is changed, its rate after the reparenting
545 * is still within the range we asked for.
546 *
547 * FIXME: clk_set_parent() only does the reparenting but doesn't
548 * reevaluate whether the new clock rate is within its boundaries or
549 * not.
550 */
551static void
552clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
553{
554 struct clk_multiple_parent_ctx *ctx = test->priv;
555 struct clk_hw *hw = &ctx->hw;
556 struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
557 struct clk *parent1, *parent2;
558 unsigned long rate;
559 int ret;
560
561 kunit_skip(test, "This needs to be fixed in the core.");
562
563 parent1 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[0].hw, NULL);
564 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
565 KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
566
567 parent2 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
568 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
569
570 ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
571 KUNIT_ASSERT_EQ(test, ret, 0);
572
573 ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
574 KUNIT_ASSERT_EQ(test, ret, 0);
575
576 ret = clk_set_rate_range(clk,
577 DUMMY_CLOCK_RATE_1 - 1000,
578 DUMMY_CLOCK_RATE_1 + 1000);
579 KUNIT_ASSERT_EQ(test, ret, 0);
580
581 ret = clk_set_parent(clk, parent2);
582 KUNIT_ASSERT_EQ(test, ret, 0);
583
584 rate = clk_get_rate(clk);
585 KUNIT_ASSERT_GT(test, rate, 0);
586 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
587 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
588}
589
590static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
591 KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
592 KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
593 KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
594 {}
595};
596
597/*
598 * Test suite for a basic mux clock with two parents, with
599 * CLK_SET_RATE_PARENT on the child.
600 *
601 * These tests exercise the consumer API and check that the state of the
602 * child and parents are sane and consistent.
603 */
604static struct kunit_suite
605clk_multiple_parents_mux_test_suite = {
606 .name = "clk-multiple-parents-mux-test",
607 .init = clk_multiple_parents_mux_test_init,
608 .test_cases = clk_multiple_parents_mux_test_cases,
609};
610
611static int
612clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
613{
614 struct clk_multiple_parent_ctx *ctx;
615 const char *parents[2] = { "missing-parent", "proper-parent"};
616 int ret;
617
618 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
619 if (!ctx)
620 return -ENOMEM;
621 test->priv = ctx;
622
623 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
624 &clk_dummy_rate_ops,
625 0);
626 ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
627 ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
628 if (ret)
629 return ret;
630
631 ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
632 &clk_multiple_parents_mux_ops,
633 CLK_SET_RATE_PARENT);
634 ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
635 if (ret)
636 return ret;
637
638 return 0;
639}
640
641/*
642 * Test that, for a mux whose current parent hasn't been registered yet and is
643 * thus orphan, clk_get_parent() will return NULL.
644 */
645static void
646clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
647{
648 struct clk_multiple_parent_ctx *ctx = test->priv;
649 struct clk_hw *hw = &ctx->hw;
650 struct clk *clk = clk_hw_get_clk(hw, NULL);
651
652 KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
653
654 clk_put(clk);
655}
656
657/*
658 * Test that, for a mux whose current parent hasn't been registered yet,
659 * calling clk_set_parent() to a valid parent will properly update the
660 * mux parent and its orphan status.
661 */
662static void
663clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
664{
665 struct clk_multiple_parent_ctx *ctx = test->priv;
666 struct clk_hw *hw = &ctx->hw;
667 struct clk *clk = clk_hw_get_clk(hw, NULL);
668 struct clk *parent, *new_parent;
669 int ret;
670
671 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
672 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
673
674 ret = clk_set_parent(clk, parent);
675 KUNIT_ASSERT_EQ(test, ret, 0);
676
677 new_parent = clk_get_parent(clk);
678 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
679 KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
680
681 clk_put(parent);
682 clk_put(clk);
683}
684
685/*
686 * Test that, for a mux that started orphan but got switched to a valid
687 * parent, calling clk_drop_range() on the mux won't affect the parent
688 * rate.
689 */
690static void
691clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
692{
693 struct clk_multiple_parent_ctx *ctx = test->priv;
694 struct clk_hw *hw = &ctx->hw;
695 struct clk *clk = clk_hw_get_clk(hw, NULL);
696 struct clk *parent;
697 unsigned long parent_rate, new_parent_rate;
698 int ret;
699
700 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
701 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
702
703 parent_rate = clk_get_rate(parent);
704 KUNIT_ASSERT_GT(test, parent_rate, 0);
705
706 ret = clk_set_parent(clk, parent);
707 KUNIT_ASSERT_EQ(test, ret, 0);
708
709 ret = clk_drop_range(clk);
710 KUNIT_ASSERT_EQ(test, ret, 0);
711
712 new_parent_rate = clk_get_rate(clk);
713 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
714 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
715
716 clk_put(parent);
717 clk_put(clk);
718}
719
720/*
721 * Test that, for a mux that started orphan but got switched to a valid
722 * parent, the rate of the mux and its new parent are consistent.
723 */
724static void
725clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
726{
727 struct clk_multiple_parent_ctx *ctx = test->priv;
728 struct clk_hw *hw = &ctx->hw;
729 struct clk *clk = clk_hw_get_clk(hw, NULL);
730 struct clk *parent;
731 unsigned long parent_rate, rate;
732 int ret;
733
734 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
735 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
736
737 parent_rate = clk_get_rate(parent);
738 KUNIT_ASSERT_GT(test, parent_rate, 0);
739
740 ret = clk_set_parent(clk, parent);
741 KUNIT_ASSERT_EQ(test, ret, 0);
742
743 rate = clk_get_rate(clk);
744 KUNIT_ASSERT_GT(test, rate, 0);
745 KUNIT_EXPECT_EQ(test, parent_rate, rate);
746
747 clk_put(parent);
748 clk_put(clk);
749}
750
751/*
752 * Test that, for a mux that started orphan but got switched to a valid
753 * parent, calling clk_put() on the mux won't affect the parent rate.
754 */
755static void
756clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
757{
758 struct clk_multiple_parent_ctx *ctx = test->priv;
759 struct clk *clk, *parent;
760 unsigned long parent_rate, new_parent_rate;
761 int ret;
762
763 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
764 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
765
766 clk = clk_hw_get_clk(&ctx->hw, NULL);
767 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
768
769 parent_rate = clk_get_rate(parent);
770 KUNIT_ASSERT_GT(test, parent_rate, 0);
771
772 ret = clk_set_parent(clk, parent);
773 KUNIT_ASSERT_EQ(test, ret, 0);
774
775 clk_put(clk);
776
777 new_parent_rate = clk_get_rate(parent);
778 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
779 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
780
781 clk_put(parent);
782}
783
784/*
785 * Test that, for a mux that started orphan but got switched to a valid
786 * parent, calling clk_set_rate_range() will affect the parent state if
787 * its rate is out of range.
788 */
789static void
790clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
791{
792 struct clk_multiple_parent_ctx *ctx = test->priv;
793 struct clk_hw *hw = &ctx->hw;
794 struct clk *clk = clk_hw_get_clk(hw, NULL);
795 struct clk *parent;
796 unsigned long rate;
797 int ret;
798
799 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
800 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
801
802 ret = clk_set_parent(clk, parent);
803 KUNIT_ASSERT_EQ(test, ret, 0);
804
805 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
806 KUNIT_ASSERT_EQ(test, ret, 0);
807
808 rate = clk_get_rate(clk);
809 KUNIT_ASSERT_GT(test, rate, 0);
810 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
811 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
812
813 clk_put(parent);
814 clk_put(clk);
815}
816
817/*
818 * Test that, for a mux that started orphan but got switched to a valid
819 * parent, calling clk_set_rate_range() won't affect the parent state if
820 * its rate is within range.
821 */
822static void
823clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
824{
825 struct clk_multiple_parent_ctx *ctx = test->priv;
826 struct clk_hw *hw = &ctx->hw;
827 struct clk *clk = clk_hw_get_clk(hw, NULL);
828 struct clk *parent;
829 unsigned long parent_rate, new_parent_rate;
830 int ret;
831
832 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
833 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
834
835 parent_rate = clk_get_rate(parent);
836 KUNIT_ASSERT_GT(test, parent_rate, 0);
837
838 ret = clk_set_parent(clk, parent);
839 KUNIT_ASSERT_EQ(test, ret, 0);
840
841 ret = clk_set_rate_range(clk,
842 DUMMY_CLOCK_INIT_RATE - 1000,
843 DUMMY_CLOCK_INIT_RATE + 1000);
844 KUNIT_ASSERT_EQ(test, ret, 0);
845
846 new_parent_rate = clk_get_rate(parent);
847 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
848 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
849
850 clk_put(parent);
851 clk_put(clk);
852}
853
854/*
855 * Test that, for a mux whose current parent hasn't been registered yet,
856 * calling clk_set_rate_range() will succeed, and will be taken into
857 * account when rounding a rate.
858 */
859static void
860clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
861{
862 struct clk_multiple_parent_ctx *ctx = test->priv;
863 struct clk_hw *hw = &ctx->hw;
864 struct clk *clk = clk_hw_get_clk(hw, NULL);
865 long rate;
866 int ret;
867
868 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
869 KUNIT_ASSERT_EQ(test, ret, 0);
870
871 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
872 KUNIT_ASSERT_GT(test, rate, 0);
873 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
874 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
875
876 clk_put(clk);
877}
878
879/*
880 * Test that, for a mux that started orphan, was assigned and rate and
881 * then got switched to a valid parent, its rate is eventually within
882 * range.
883 *
884 * FIXME: Even though we update the rate as part of clk_set_parent(), we
885 * don't evaluate whether that new rate is within range and needs to be
886 * adjusted.
887 */
888static void
889clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
890{
891 struct clk_multiple_parent_ctx *ctx = test->priv;
892 struct clk_hw *hw = &ctx->hw;
893 struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
894 struct clk *parent;
895 unsigned long rate;
896 int ret;
897
898 kunit_skip(test, "This needs to be fixed in the core.");
899
900 clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
901
902 parent = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
903 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
904
905 ret = clk_set_parent(clk, parent);
906 KUNIT_ASSERT_EQ(test, ret, 0);
907
908 rate = clk_get_rate(clk);
909 KUNIT_ASSERT_GT(test, rate, 0);
910 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
911 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
912}
913
914static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
915 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
916 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
917 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
918 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
919 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
920 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
921 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
922 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
923 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
924 {}
925};
926
927/*
928 * Test suite for a basic mux clock with two parents. The default parent
929 * isn't registered, only the second parent is. By default, the clock
930 * will thus be orphan.
931 *
932 * These tests exercise the behaviour of the consumer API when dealing
933 * with an orphan clock, and how we deal with the transition to a valid
934 * parent.
935 */
936static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
937 .name = "clk-orphan-transparent-multiple-parent-mux-test",
938 .init = clk_orphan_transparent_multiple_parent_mux_test_init,
939 .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
940};
941
942struct clk_single_parent_ctx {
943 struct clk_dummy_context parent_ctx;
944 struct clk_hw hw;
945};
946
947static int clk_single_parent_mux_test_init(struct kunit *test)
948{
949 struct clk_single_parent_ctx *ctx;
950 int ret;
951
952 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
953 if (!ctx)
954 return -ENOMEM;
955 test->priv = ctx;
956
957 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
958 ctx->parent_ctx.hw.init =
959 CLK_HW_INIT_NO_PARENT("parent-clk",
960 &clk_dummy_rate_ops,
961 0);
962
963 ret = clk_hw_register_kunit(test, NULL, &ctx->parent_ctx.hw);
964 if (ret)
965 return ret;
966
967 ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
968 &clk_dummy_single_parent_ops,
969 CLK_SET_RATE_PARENT);
970
971 ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
972 if (ret)
973 return ret;
974
975 return 0;
976}
977
978static void
979clk_single_parent_mux_test_exit(struct kunit *test)
980{
981 struct clk_single_parent_ctx *ctx = test->priv;
982
983 clk_hw_unregister(&ctx->hw);
984 clk_hw_unregister(&ctx->parent_ctx.hw);
985}
986
987/*
988 * Test that for a clock with a single parent, clk_get_parent() actually
989 * returns the parent.
990 */
991static void
992clk_test_single_parent_mux_get_parent(struct kunit *test)
993{
994 struct clk_single_parent_ctx *ctx = test->priv;
995 struct clk_hw *hw = &ctx->hw;
996 struct clk *clk = clk_hw_get_clk(hw, NULL);
997 struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
998
999 KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
1000
1001 clk_put(parent);
1002 clk_put(clk);
1003}
1004
1005/*
1006 * Test that for a clock with a single parent, clk_has_parent() actually
1007 * reports it as a parent.
1008 */
1009static void
1010clk_test_single_parent_mux_has_parent(struct kunit *test)
1011{
1012 struct clk_single_parent_ctx *ctx = test->priv;
1013 struct clk_hw *hw = &ctx->hw;
1014 struct clk *clk = clk_hw_get_clk(hw, NULL);
1015 struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1016
1017 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1018
1019 clk_put(parent);
1020 clk_put(clk);
1021}
1022
1023/*
1024 * Test that for a clock that can't modify its rate and with a single
1025 * parent, if we set disjoints range on the parent and then the child,
1026 * the second will return an error.
1027 *
1028 * FIXME: clk_set_rate_range() only considers the current clock when
1029 * evaluating whether ranges are disjoints and not the upstream clocks
1030 * ranges.
1031 */
1032static void
1033clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1034{
1035 struct clk_single_parent_ctx *ctx = test->priv;
1036 struct clk_hw *hw = &ctx->hw;
1037 struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
1038 struct clk *parent;
1039 int ret;
1040
1041 kunit_skip(test, "This needs to be fixed in the core.");
1042
1043 parent = clk_get_parent(clk);
1044 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1045
1046 ret = clk_set_rate_range(parent, 1000, 2000);
1047 KUNIT_ASSERT_EQ(test, ret, 0);
1048
1049 ret = clk_set_rate_range(clk, 3000, 4000);
1050 KUNIT_EXPECT_LT(test, ret, 0);
1051}
1052
1053/*
1054 * Test that for a clock that can't modify its rate and with a single
1055 * parent, if we set disjoints range on the child and then the parent,
1056 * the second will return an error.
1057 *
1058 * FIXME: clk_set_rate_range() only considers the current clock when
1059 * evaluating whether ranges are disjoints and not the downstream clocks
1060 * ranges.
1061 */
1062static void
1063clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1064{
1065 struct clk_single_parent_ctx *ctx = test->priv;
1066 struct clk_hw *hw = &ctx->hw;
1067 struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
1068 struct clk *parent;
1069 int ret;
1070
1071 kunit_skip(test, "This needs to be fixed in the core.");
1072
1073 parent = clk_get_parent(clk);
1074 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1075
1076 ret = clk_set_rate_range(clk, 1000, 2000);
1077 KUNIT_ASSERT_EQ(test, ret, 0);
1078
1079 ret = clk_set_rate_range(parent, 3000, 4000);
1080 KUNIT_EXPECT_LT(test, ret, 0);
1081}
1082
1083/*
1084 * Test that for a clock that can't modify its rate and with a single
1085 * parent, if we set a range on the parent and then call
1086 * clk_round_rate(), the boundaries of the parent are taken into
1087 * account.
1088 */
1089static void
1090clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1091{
1092 struct clk_single_parent_ctx *ctx = test->priv;
1093 struct clk_hw *hw = &ctx->hw;
1094 struct clk *clk = clk_hw_get_clk(hw, NULL);
1095 struct clk *parent;
1096 long rate;
1097 int ret;
1098
1099 parent = clk_get_parent(clk);
1100 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1101
1102 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1103 KUNIT_ASSERT_EQ(test, ret, 0);
1104
1105 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1106 KUNIT_ASSERT_GT(test, rate, 0);
1107 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1108 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1109
1110 clk_put(clk);
1111}
1112
1113/*
1114 * Test that for a clock that can't modify its rate and with a single
1115 * parent, if we set a range on the parent and a more restrictive one on
1116 * the child, and then call clk_round_rate(), the boundaries of the
1117 * two clocks are taken into account.
1118 */
1119static void
1120clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1121{
1122 struct clk_single_parent_ctx *ctx = test->priv;
1123 struct clk_hw *hw = &ctx->hw;
1124 struct clk *clk = clk_hw_get_clk(hw, NULL);
1125 struct clk *parent;
1126 long rate;
1127 int ret;
1128
1129 parent = clk_get_parent(clk);
1130 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1131
1132 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1133 KUNIT_ASSERT_EQ(test, ret, 0);
1134
1135 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1136 KUNIT_ASSERT_EQ(test, ret, 0);
1137
1138 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1139 KUNIT_ASSERT_GT(test, rate, 0);
1140 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1141 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1142
1143 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1144 KUNIT_ASSERT_GT(test, rate, 0);
1145 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1146 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1147
1148 clk_put(clk);
1149}
1150
1151/*
1152 * Test that for a clock that can't modify its rate and with a single
1153 * parent, if we set a range on the child and a more restrictive one on
1154 * the parent, and then call clk_round_rate(), the boundaries of the
1155 * two clocks are taken into account.
1156 */
1157static void
1158clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1159{
1160 struct clk_single_parent_ctx *ctx = test->priv;
1161 struct clk_hw *hw = &ctx->hw;
1162 struct clk *clk = clk_hw_get_clk(hw, NULL);
1163 struct clk *parent;
1164 long rate;
1165 int ret;
1166
1167 parent = clk_get_parent(clk);
1168 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1169
1170 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1171 KUNIT_ASSERT_EQ(test, ret, 0);
1172
1173 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1174 KUNIT_ASSERT_EQ(test, ret, 0);
1175
1176 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1177 KUNIT_ASSERT_GT(test, rate, 0);
1178 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1179 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1180
1181 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1182 KUNIT_ASSERT_GT(test, rate, 0);
1183 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1184 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1185
1186 clk_put(clk);
1187}
1188
1189static struct kunit_case clk_single_parent_mux_test_cases[] = {
1190 KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1191 KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1192 KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1193 KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1194 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1195 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1196 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1197 {}
1198};
1199
1200/*
1201 * Test suite for a basic mux clock with one parent, with
1202 * CLK_SET_RATE_PARENT on the child.
1203 *
1204 * These tests exercise the consumer API and check that the state of the
1205 * child and parent are sane and consistent.
1206 */
1207static struct kunit_suite
1208clk_single_parent_mux_test_suite = {
1209 .name = "clk-single-parent-mux-test",
1210 .init = clk_single_parent_mux_test_init,
1211 .test_cases = clk_single_parent_mux_test_cases,
1212};
1213
1214static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1215{
1216 struct clk_single_parent_ctx *ctx;
1217 struct clk_init_data init = { };
1218 const char * const parents[] = { "orphan_parent" };
1219 int ret;
1220
1221 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1222 if (!ctx)
1223 return -ENOMEM;
1224 test->priv = ctx;
1225
1226 init.name = "test_orphan_dummy_parent";
1227 init.ops = &clk_dummy_single_parent_ops;
1228 init.parent_names = parents;
1229 init.num_parents = ARRAY_SIZE(parents);
1230 init.flags = CLK_SET_RATE_PARENT;
1231 ctx->hw.init = &init;
1232
1233 ret = clk_hw_register(NULL, &ctx->hw);
1234 if (ret)
1235 return ret;
1236
1237 memset(&init, 0, sizeof(init));
1238 init.name = "orphan_parent";
1239 init.ops = &clk_dummy_rate_ops;
1240 ctx->parent_ctx.hw.init = &init;
1241 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1242
1243 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1244 if (ret)
1245 return ret;
1246
1247 return 0;
1248}
1249
1250/*
1251 * Test that a mux-only clock, with an initial rate within a range,
1252 * will still have the same rate after the range has been enforced.
1253 *
1254 * See:
1255 * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1256 */
1257static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1258{
1259 struct clk_single_parent_ctx *ctx = test->priv;
1260 struct clk_hw *hw = &ctx->hw;
1261 struct clk *clk = clk_hw_get_clk(hw, NULL);
1262 unsigned long rate, new_rate;
1263
1264 rate = clk_get_rate(clk);
1265 KUNIT_ASSERT_GT(test, rate, 0);
1266
1267 KUNIT_ASSERT_EQ(test,
1268 clk_set_rate_range(clk,
1269 ctx->parent_ctx.rate - 1000,
1270 ctx->parent_ctx.rate + 1000),
1271 0);
1272
1273 new_rate = clk_get_rate(clk);
1274 KUNIT_ASSERT_GT(test, new_rate, 0);
1275 KUNIT_EXPECT_EQ(test, rate, new_rate);
1276
1277 clk_put(clk);
1278}
1279
1280static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1281 KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1282 {}
1283};
1284
1285/*
1286 * Test suite for a basic mux clock with one parent. The parent is
1287 * registered after its child. The clock will thus be an orphan when
1288 * registered, but will no longer be when the tests run.
1289 *
1290 * These tests make sure a clock that used to be orphan has a sane,
1291 * consistent, behaviour.
1292 */
1293static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1294 .name = "clk-orphan-transparent-single-parent-test",
1295 .init = clk_orphan_transparent_single_parent_mux_test_init,
1296 .exit = clk_single_parent_mux_test_exit,
1297 .test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1298};
1299
1300struct clk_single_parent_two_lvl_ctx {
1301 struct clk_dummy_context parent_parent_ctx;
1302 struct clk_dummy_context parent_ctx;
1303 struct clk_hw hw;
1304};
1305
1306static int
1307clk_orphan_two_level_root_last_test_init(struct kunit *test)
1308{
1309 struct clk_single_parent_two_lvl_ctx *ctx;
1310 int ret;
1311
1312 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1313 if (!ctx)
1314 return -ENOMEM;
1315 test->priv = ctx;
1316
1317 ctx->parent_ctx.hw.init =
1318 CLK_HW_INIT("intermediate-parent",
1319 "root-parent",
1320 &clk_dummy_single_parent_ops,
1321 CLK_SET_RATE_PARENT);
1322 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1323 if (ret)
1324 return ret;
1325
1326 ctx->hw.init =
1327 CLK_HW_INIT("test-clk", "intermediate-parent",
1328 &clk_dummy_single_parent_ops,
1329 CLK_SET_RATE_PARENT);
1330 ret = clk_hw_register(NULL, &ctx->hw);
1331 if (ret)
1332 return ret;
1333
1334 ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1335 ctx->parent_parent_ctx.hw.init =
1336 CLK_HW_INIT_NO_PARENT("root-parent",
1337 &clk_dummy_rate_ops,
1338 0);
1339 ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1340 if (ret)
1341 return ret;
1342
1343 return 0;
1344}
1345
1346static void
1347clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1348{
1349 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1350
1351 clk_hw_unregister(&ctx->hw);
1352 clk_hw_unregister(&ctx->parent_ctx.hw);
1353 clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1354}
1355
1356/*
1357 * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1358 * will return the proper rate.
1359 */
1360static void
1361clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1362{
1363 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1364 struct clk_hw *hw = &ctx->hw;
1365 struct clk *clk = clk_hw_get_clk(hw, NULL);
1366 unsigned long rate;
1367
1368 rate = clk_get_rate(clk);
1369 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1370
1371 clk_put(clk);
1372}
1373
1374/*
1375 * Test that, for a clock whose parent used to be orphan,
1376 * clk_set_rate_range() won't affect its rate if it is already within
1377 * range.
1378 *
1379 * See (for Exynos 4210):
1380 * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1381 */
1382static void
1383clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1384{
1385 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1386 struct clk_hw *hw = &ctx->hw;
1387 struct clk *clk = clk_hw_get_clk(hw, NULL);
1388 unsigned long rate;
1389 int ret;
1390
1391 ret = clk_set_rate_range(clk,
1392 DUMMY_CLOCK_INIT_RATE - 1000,
1393 DUMMY_CLOCK_INIT_RATE + 1000);
1394 KUNIT_ASSERT_EQ(test, ret, 0);
1395
1396 rate = clk_get_rate(clk);
1397 KUNIT_ASSERT_GT(test, rate, 0);
1398 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1399
1400 clk_put(clk);
1401}
1402
1403static struct kunit_case
1404clk_orphan_two_level_root_last_test_cases[] = {
1405 KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1406 KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1407 {}
1408};
1409
1410/*
1411 * Test suite for a basic, transparent, clock with a parent that is also
1412 * such a clock. The parent's parent is registered last, while the
1413 * parent and its child are registered in that order. The intermediate
1414 * and leaf clocks will thus be orphan when registered, but the leaf
1415 * clock itself will always have its parent and will never be
1416 * reparented. Indeed, it's only orphan because its parent is.
1417 *
1418 * These tests exercise the behaviour of the consumer API when dealing
1419 * with an orphan clock, and how we deal with the transition to a valid
1420 * parent.
1421 */
1422static struct kunit_suite
1423clk_orphan_two_level_root_last_test_suite = {
1424 .name = "clk-orphan-two-level-root-last-test",
1425 .init = clk_orphan_two_level_root_last_test_init,
1426 .exit = clk_orphan_two_level_root_last_test_exit,
1427 .test_cases = clk_orphan_two_level_root_last_test_cases,
1428};
1429
1430/*
1431 * Test that clk_set_rate_range won't return an error for a valid range
1432 * and that it will make sure the rate of the clock is within the
1433 * boundaries.
1434 */
1435static void clk_range_test_set_range(struct kunit *test)
1436{
1437 struct clk_dummy_context *ctx = test->priv;
1438 struct clk_hw *hw = &ctx->hw;
1439 struct clk *clk = clk_hw_get_clk(hw, NULL);
1440 unsigned long rate;
1441
1442 KUNIT_ASSERT_EQ(test,
1443 clk_set_rate_range(clk,
1444 DUMMY_CLOCK_RATE_1,
1445 DUMMY_CLOCK_RATE_2),
1446 0);
1447
1448 rate = clk_get_rate(clk);
1449 KUNIT_ASSERT_GT(test, rate, 0);
1450 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1451 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1452
1453 clk_put(clk);
1454}
1455
1456/*
1457 * Test that calling clk_set_rate_range with a minimum rate higher than
1458 * the maximum rate returns an error.
1459 */
1460static void clk_range_test_set_range_invalid(struct kunit *test)
1461{
1462 struct clk_dummy_context *ctx = test->priv;
1463 struct clk_hw *hw = &ctx->hw;
1464 struct clk *clk = clk_hw_get_clk(hw, NULL);
1465
1466 KUNIT_EXPECT_LT(test,
1467 clk_set_rate_range(clk,
1468 DUMMY_CLOCK_RATE_1 + 1000,
1469 DUMMY_CLOCK_RATE_1),
1470 0);
1471
1472 clk_put(clk);
1473}
1474
1475/*
1476 * Test that users can't set multiple, disjoints, range that would be
1477 * impossible to meet.
1478 */
1479static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1480{
1481 struct clk_dummy_context *ctx = test->priv;
1482 struct clk_hw *hw = &ctx->hw;
1483 struct clk *user1, *user2;
1484
1485 user1 = clk_hw_get_clk(hw, NULL);
1486 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1487
1488 user2 = clk_hw_get_clk(hw, NULL);
1489 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1490
1491 KUNIT_ASSERT_EQ(test,
1492 clk_set_rate_range(user1, 1000, 2000),
1493 0);
1494
1495 KUNIT_EXPECT_LT(test,
1496 clk_set_rate_range(user2, 3000, 4000),
1497 0);
1498
1499 clk_put(user2);
1500 clk_put(user1);
1501}
1502
1503/*
1504 * Test that if our clock has some boundaries and we try to round a rate
1505 * lower than the minimum, the returned rate will be within range.
1506 */
1507static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1508{
1509 struct clk_dummy_context *ctx = test->priv;
1510 struct clk_hw *hw = &ctx->hw;
1511 struct clk *clk = clk_hw_get_clk(hw, NULL);
1512 long rate;
1513
1514 KUNIT_ASSERT_EQ(test,
1515 clk_set_rate_range(clk,
1516 DUMMY_CLOCK_RATE_1,
1517 DUMMY_CLOCK_RATE_2),
1518 0);
1519
1520 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1521 KUNIT_ASSERT_GT(test, rate, 0);
1522 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1523 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1524
1525 clk_put(clk);
1526}
1527
1528/*
1529 * Test that if our clock has some boundaries and we try to set a rate
1530 * higher than the maximum, the new rate will be within range.
1531 */
1532static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1533{
1534 struct clk_dummy_context *ctx = test->priv;
1535 struct clk_hw *hw = &ctx->hw;
1536 struct clk *clk = clk_hw_get_clk(hw, NULL);
1537 unsigned long rate;
1538
1539 KUNIT_ASSERT_EQ(test,
1540 clk_set_rate_range(clk,
1541 DUMMY_CLOCK_RATE_1,
1542 DUMMY_CLOCK_RATE_2),
1543 0);
1544
1545 KUNIT_ASSERT_EQ(test,
1546 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1547 0);
1548
1549 rate = clk_get_rate(clk);
1550 KUNIT_ASSERT_GT(test, rate, 0);
1551 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1552 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1553
1554 clk_put(clk);
1555}
1556
1557/*
1558 * Test that if our clock has some boundaries and we try to round and
1559 * set a rate lower than the minimum, the rate returned by
1560 * clk_round_rate() will be consistent with the new rate set by
1561 * clk_set_rate().
1562 */
1563static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1564{
1565 struct clk_dummy_context *ctx = test->priv;
1566 struct clk_hw *hw = &ctx->hw;
1567 struct clk *clk = clk_hw_get_clk(hw, NULL);
1568 long rounded;
1569
1570 KUNIT_ASSERT_EQ(test,
1571 clk_set_rate_range(clk,
1572 DUMMY_CLOCK_RATE_1,
1573 DUMMY_CLOCK_RATE_2),
1574 0);
1575
1576 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1577 KUNIT_ASSERT_GT(test, rounded, 0);
1578
1579 KUNIT_ASSERT_EQ(test,
1580 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1581 0);
1582
1583 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1584
1585 clk_put(clk);
1586}
1587
1588/*
1589 * Test that if our clock has some boundaries and we try to round a rate
1590 * higher than the maximum, the returned rate will be within range.
1591 */
1592static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1593{
1594 struct clk_dummy_context *ctx = test->priv;
1595 struct clk_hw *hw = &ctx->hw;
1596 struct clk *clk = clk_hw_get_clk(hw, NULL);
1597 long rate;
1598
1599 KUNIT_ASSERT_EQ(test,
1600 clk_set_rate_range(clk,
1601 DUMMY_CLOCK_RATE_1,
1602 DUMMY_CLOCK_RATE_2),
1603 0);
1604
1605 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1606 KUNIT_ASSERT_GT(test, rate, 0);
1607 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1608 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1609
1610 clk_put(clk);
1611}
1612
1613/*
1614 * Test that if our clock has some boundaries and we try to set a rate
1615 * higher than the maximum, the new rate will be within range.
1616 */
1617static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1618{
1619 struct clk_dummy_context *ctx = test->priv;
1620 struct clk_hw *hw = &ctx->hw;
1621 struct clk *clk = clk_hw_get_clk(hw, NULL);
1622 unsigned long rate;
1623
1624 KUNIT_ASSERT_EQ(test,
1625 clk_set_rate_range(clk,
1626 DUMMY_CLOCK_RATE_1,
1627 DUMMY_CLOCK_RATE_2),
1628 0);
1629
1630 KUNIT_ASSERT_EQ(test,
1631 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1632 0);
1633
1634 rate = clk_get_rate(clk);
1635 KUNIT_ASSERT_GT(test, rate, 0);
1636 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1637 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1638
1639 clk_put(clk);
1640}
1641
1642/*
1643 * Test that if our clock has some boundaries and we try to round and
1644 * set a rate higher than the maximum, the rate returned by
1645 * clk_round_rate() will be consistent with the new rate set by
1646 * clk_set_rate().
1647 */
1648static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1649{
1650 struct clk_dummy_context *ctx = test->priv;
1651 struct clk_hw *hw = &ctx->hw;
1652 struct clk *clk = clk_hw_get_clk(hw, NULL);
1653 long rounded;
1654
1655 KUNIT_ASSERT_EQ(test,
1656 clk_set_rate_range(clk,
1657 DUMMY_CLOCK_RATE_1,
1658 DUMMY_CLOCK_RATE_2),
1659 0);
1660
1661 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1662 KUNIT_ASSERT_GT(test, rounded, 0);
1663
1664 KUNIT_ASSERT_EQ(test,
1665 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1666 0);
1667
1668 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1669
1670 clk_put(clk);
1671}
1672
1673/*
1674 * Test that if our clock has a rate lower than the minimum set by a
1675 * call to clk_set_rate_range(), the rate will be raised to match the
1676 * new minimum.
1677 *
1678 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1679 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1680 */
1681static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1682{
1683 struct clk_dummy_context *ctx = test->priv;
1684 struct clk_hw *hw = &ctx->hw;
1685 struct clk *clk = clk_hw_get_clk(hw, NULL);
1686 unsigned long rate;
1687
1688 KUNIT_ASSERT_EQ(test,
1689 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1690 0);
1691
1692 KUNIT_ASSERT_EQ(test,
1693 clk_set_rate_range(clk,
1694 DUMMY_CLOCK_RATE_1,
1695 DUMMY_CLOCK_RATE_2),
1696 0);
1697
1698 rate = clk_get_rate(clk);
1699 KUNIT_ASSERT_GT(test, rate, 0);
1700 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1701
1702 clk_put(clk);
1703}
1704
1705/*
1706 * Test that if our clock has a rate higher than the maximum set by a
1707 * call to clk_set_rate_range(), the rate will be lowered to match the
1708 * new maximum.
1709 *
1710 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1711 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1712 */
1713static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1714{
1715 struct clk_dummy_context *ctx = test->priv;
1716 struct clk_hw *hw = &ctx->hw;
1717 struct clk *clk = clk_hw_get_clk(hw, NULL);
1718 unsigned long rate;
1719
1720 KUNIT_ASSERT_EQ(test,
1721 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1722 0);
1723
1724 KUNIT_ASSERT_EQ(test,
1725 clk_set_rate_range(clk,
1726 DUMMY_CLOCK_RATE_1,
1727 DUMMY_CLOCK_RATE_2),
1728 0);
1729
1730 rate = clk_get_rate(clk);
1731 KUNIT_ASSERT_GT(test, rate, 0);
1732 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1733
1734 clk_put(clk);
1735}
1736
1737static struct kunit_case clk_range_test_cases[] = {
1738 KUNIT_CASE(clk_range_test_set_range),
1739 KUNIT_CASE(clk_range_test_set_range_invalid),
1740 KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1741 KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1742 KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1743 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1744 KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1745 KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1746 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1747 KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1748 KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1749 {}
1750};
1751
1752/*
1753 * Test suite for a basic rate clock, without any parent.
1754 *
1755 * These tests exercise the rate range API: clk_set_rate_range(),
1756 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1757 */
1758static struct kunit_suite clk_range_test_suite = {
1759 .name = "clk-range-test",
1760 .init = clk_test_init,
1761 .exit = clk_test_exit,
1762 .test_cases = clk_range_test_cases,
1763};
1764
1765/*
1766 * Test that if we have several subsequent calls to
1767 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1768 * needed each and every time.
1769 *
1770 * With clk_dummy_maximize_rate_ops, this means that the rate will
1771 * trail along the maximum as it evolves.
1772 */
1773static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1774{
1775 struct clk_dummy_context *ctx = test->priv;
1776 struct clk_hw *hw = &ctx->hw;
1777 struct clk *clk = clk_hw_get_clk(hw, NULL);
1778 unsigned long rate;
1779
1780 KUNIT_ASSERT_EQ(test,
1781 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1782 0);
1783
1784 KUNIT_ASSERT_EQ(test,
1785 clk_set_rate_range(clk,
1786 DUMMY_CLOCK_RATE_1,
1787 DUMMY_CLOCK_RATE_2),
1788 0);
1789
1790 rate = clk_get_rate(clk);
1791 KUNIT_ASSERT_GT(test, rate, 0);
1792 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1793
1794 KUNIT_ASSERT_EQ(test,
1795 clk_set_rate_range(clk,
1796 DUMMY_CLOCK_RATE_1,
1797 DUMMY_CLOCK_RATE_2 - 1000),
1798 0);
1799
1800 rate = clk_get_rate(clk);
1801 KUNIT_ASSERT_GT(test, rate, 0);
1802 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1803
1804 KUNIT_ASSERT_EQ(test,
1805 clk_set_rate_range(clk,
1806 DUMMY_CLOCK_RATE_1,
1807 DUMMY_CLOCK_RATE_2),
1808 0);
1809
1810 rate = clk_get_rate(clk);
1811 KUNIT_ASSERT_GT(test, rate, 0);
1812 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1813
1814 clk_put(clk);
1815}
1816
1817/*
1818 * Test that if we have several subsequent calls to
1819 * clk_set_rate_range(), across multiple users, the core will reevaluate
1820 * whether a new rate is needed each and every time.
1821 *
1822 * With clk_dummy_maximize_rate_ops, this means that the rate will
1823 * trail along the maximum as it evolves.
1824 */
1825static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1826{
1827 struct clk_dummy_context *ctx = test->priv;
1828 struct clk_hw *hw = &ctx->hw;
1829 struct clk *clk = clk_hw_get_clk(hw, NULL);
1830 struct clk *user1, *user2;
1831 unsigned long rate;
1832
1833 user1 = clk_hw_get_clk(hw, NULL);
1834 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1835
1836 user2 = clk_hw_get_clk(hw, NULL);
1837 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1838
1839 KUNIT_ASSERT_EQ(test,
1840 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1841 0);
1842
1843 KUNIT_ASSERT_EQ(test,
1844 clk_set_rate_range(user1,
1845 0,
1846 DUMMY_CLOCK_RATE_2),
1847 0);
1848
1849 rate = clk_get_rate(clk);
1850 KUNIT_ASSERT_GT(test, rate, 0);
1851 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1852
1853 KUNIT_ASSERT_EQ(test,
1854 clk_set_rate_range(user2,
1855 0,
1856 DUMMY_CLOCK_RATE_1),
1857 0);
1858
1859 rate = clk_get_rate(clk);
1860 KUNIT_ASSERT_GT(test, rate, 0);
1861 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1862
1863 KUNIT_ASSERT_EQ(test,
1864 clk_drop_range(user2),
1865 0);
1866
1867 rate = clk_get_rate(clk);
1868 KUNIT_ASSERT_GT(test, rate, 0);
1869 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1870
1871 clk_put(user2);
1872 clk_put(user1);
1873 clk_put(clk);
1874}
1875
1876/*
1877 * Test that if we have several subsequent calls to
1878 * clk_set_rate_range(), across multiple users, the core will reevaluate
1879 * whether a new rate is needed, including when a user drop its clock.
1880 *
1881 * With clk_dummy_maximize_rate_ops, this means that the rate will
1882 * trail along the maximum as it evolves.
1883 */
1884static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1885{
1886 struct clk_dummy_context *ctx = test->priv;
1887 struct clk_hw *hw = &ctx->hw;
1888 struct clk *clk = clk_hw_get_clk(hw, NULL);
1889 struct clk *user1, *user2;
1890 unsigned long rate;
1891
1892 user1 = clk_hw_get_clk(hw, NULL);
1893 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1894
1895 user2 = clk_hw_get_clk(hw, NULL);
1896 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1897
1898 KUNIT_ASSERT_EQ(test,
1899 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1900 0);
1901
1902 KUNIT_ASSERT_EQ(test,
1903 clk_set_rate_range(user1,
1904 0,
1905 DUMMY_CLOCK_RATE_2),
1906 0);
1907
1908 rate = clk_get_rate(clk);
1909 KUNIT_ASSERT_GT(test, rate, 0);
1910 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1911
1912 KUNIT_ASSERT_EQ(test,
1913 clk_set_rate_range(user2,
1914 0,
1915 DUMMY_CLOCK_RATE_1),
1916 0);
1917
1918 rate = clk_get_rate(clk);
1919 KUNIT_ASSERT_GT(test, rate, 0);
1920 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1921
1922 clk_put(user2);
1923
1924 rate = clk_get_rate(clk);
1925 KUNIT_ASSERT_GT(test, rate, 0);
1926 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1927
1928 clk_put(user1);
1929 clk_put(clk);
1930}
1931
1932static struct kunit_case clk_range_maximize_test_cases[] = {
1933 KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1934 KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1935 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1936 {}
1937};
1938
1939/*
1940 * Test suite for a basic rate clock, without any parent.
1941 *
1942 * These tests exercise the rate range API: clk_set_rate_range(),
1943 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1944 * driver that will always try to run at the highest possible rate.
1945 */
1946static struct kunit_suite clk_range_maximize_test_suite = {
1947 .name = "clk-range-maximize-test",
1948 .init = clk_maximize_test_init,
1949 .exit = clk_test_exit,
1950 .test_cases = clk_range_maximize_test_cases,
1951};
1952
1953/*
1954 * Test that if we have several subsequent calls to
1955 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1956 * needed each and every time.
1957 *
1958 * With clk_dummy_minimize_rate_ops, this means that the rate will
1959 * trail along the minimum as it evolves.
1960 */
1961static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1962{
1963 struct clk_dummy_context *ctx = test->priv;
1964 struct clk_hw *hw = &ctx->hw;
1965 struct clk *clk = clk_hw_get_clk(hw, NULL);
1966 unsigned long rate;
1967
1968 KUNIT_ASSERT_EQ(test,
1969 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1970 0);
1971
1972 KUNIT_ASSERT_EQ(test,
1973 clk_set_rate_range(clk,
1974 DUMMY_CLOCK_RATE_1,
1975 DUMMY_CLOCK_RATE_2),
1976 0);
1977
1978 rate = clk_get_rate(clk);
1979 KUNIT_ASSERT_GT(test, rate, 0);
1980 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1981
1982 KUNIT_ASSERT_EQ(test,
1983 clk_set_rate_range(clk,
1984 DUMMY_CLOCK_RATE_1 + 1000,
1985 DUMMY_CLOCK_RATE_2),
1986 0);
1987
1988 rate = clk_get_rate(clk);
1989 KUNIT_ASSERT_GT(test, rate, 0);
1990 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1991
1992 KUNIT_ASSERT_EQ(test,
1993 clk_set_rate_range(clk,
1994 DUMMY_CLOCK_RATE_1,
1995 DUMMY_CLOCK_RATE_2),
1996 0);
1997
1998 rate = clk_get_rate(clk);
1999 KUNIT_ASSERT_GT(test, rate, 0);
2000 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2001
2002 clk_put(clk);
2003}
2004
2005/*
2006 * Test that if we have several subsequent calls to
2007 * clk_set_rate_range(), across multiple users, the core will reevaluate
2008 * whether a new rate is needed each and every time.
2009 *
2010 * With clk_dummy_minimize_rate_ops, this means that the rate will
2011 * trail along the minimum as it evolves.
2012 */
2013static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2014{
2015 struct clk_dummy_context *ctx = test->priv;
2016 struct clk_hw *hw = &ctx->hw;
2017 struct clk *clk = clk_hw_get_clk(hw, NULL);
2018 struct clk *user1, *user2;
2019 unsigned long rate;
2020
2021 user1 = clk_hw_get_clk(hw, NULL);
2022 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2023
2024 user2 = clk_hw_get_clk(hw, NULL);
2025 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2026
2027 KUNIT_ASSERT_EQ(test,
2028 clk_set_rate_range(user1,
2029 DUMMY_CLOCK_RATE_1,
2030 ULONG_MAX),
2031 0);
2032
2033 rate = clk_get_rate(clk);
2034 KUNIT_ASSERT_GT(test, rate, 0);
2035 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2036
2037 KUNIT_ASSERT_EQ(test,
2038 clk_set_rate_range(user2,
2039 DUMMY_CLOCK_RATE_2,
2040 ULONG_MAX),
2041 0);
2042
2043 rate = clk_get_rate(clk);
2044 KUNIT_ASSERT_GT(test, rate, 0);
2045 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2046
2047 KUNIT_ASSERT_EQ(test,
2048 clk_drop_range(user2),
2049 0);
2050
2051 rate = clk_get_rate(clk);
2052 KUNIT_ASSERT_GT(test, rate, 0);
2053 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2054
2055 clk_put(user2);
2056 clk_put(user1);
2057 clk_put(clk);
2058}
2059
2060/*
2061 * Test that if we have several subsequent calls to
2062 * clk_set_rate_range(), across multiple users, the core will reevaluate
2063 * whether a new rate is needed, including when a user drop its clock.
2064 *
2065 * With clk_dummy_minimize_rate_ops, this means that the rate will
2066 * trail along the minimum as it evolves.
2067 */
2068static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2069{
2070 struct clk_dummy_context *ctx = test->priv;
2071 struct clk_hw *hw = &ctx->hw;
2072 struct clk *clk = clk_hw_get_clk(hw, NULL);
2073 struct clk *user1, *user2;
2074 unsigned long rate;
2075
2076 user1 = clk_hw_get_clk(hw, NULL);
2077 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2078
2079 user2 = clk_hw_get_clk(hw, NULL);
2080 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2081
2082 KUNIT_ASSERT_EQ(test,
2083 clk_set_rate_range(user1,
2084 DUMMY_CLOCK_RATE_1,
2085 ULONG_MAX),
2086 0);
2087
2088 rate = clk_get_rate(clk);
2089 KUNIT_ASSERT_GT(test, rate, 0);
2090 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2091
2092 KUNIT_ASSERT_EQ(test,
2093 clk_set_rate_range(user2,
2094 DUMMY_CLOCK_RATE_2,
2095 ULONG_MAX),
2096 0);
2097
2098 rate = clk_get_rate(clk);
2099 KUNIT_ASSERT_GT(test, rate, 0);
2100 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2101
2102 clk_put(user2);
2103
2104 rate = clk_get_rate(clk);
2105 KUNIT_ASSERT_GT(test, rate, 0);
2106 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2107
2108 clk_put(user1);
2109 clk_put(clk);
2110}
2111
2112static struct kunit_case clk_range_minimize_test_cases[] = {
2113 KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2114 KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2115 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2116 {}
2117};
2118
2119/*
2120 * Test suite for a basic rate clock, without any parent.
2121 *
2122 * These tests exercise the rate range API: clk_set_rate_range(),
2123 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2124 * driver that will always try to run at the lowest possible rate.
2125 */
2126static struct kunit_suite clk_range_minimize_test_suite = {
2127 .name = "clk-range-minimize-test",
2128 .init = clk_minimize_test_init,
2129 .exit = clk_test_exit,
2130 .test_cases = clk_range_minimize_test_cases,
2131};
2132
2133struct clk_leaf_mux_ctx {
2134 struct clk_multiple_parent_ctx mux_ctx;
2135 struct clk_hw hw;
2136 struct clk_hw parent;
2137 struct clk_rate_request *req;
2138 int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2139};
2140
2141static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
2142{
2143 struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw);
2144 int ret;
2145 struct clk_rate_request *parent_req = ctx->req;
2146
2147 clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate);
2148 ret = ctx->determine_rate_func(req->best_parent_hw, parent_req);
2149 if (ret)
2150 return ret;
2151
2152 req->rate = parent_req->rate;
2153
2154 return 0;
2155}
2156
2157static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = {
2158 .determine_rate = clk_leaf_mux_determine_rate,
2159 .set_parent = clk_dummy_single_set_parent,
2160 .get_parent = clk_dummy_single_get_parent,
2161};
2162
2163static int
2164clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2165{
2166 struct clk_leaf_mux_ctx *ctx;
2167 const char *top_parents[2] = { "parent-0", "parent-1" };
2168 int ret;
2169
2170 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2171 if (!ctx)
2172 return -ENOMEM;
2173 test->priv = ctx;
2174
2175 ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2176 &clk_dummy_rate_ops,
2177 0);
2178 ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2179 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2180 if (ret)
2181 return ret;
2182
2183 ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2184 &clk_dummy_rate_ops,
2185 0);
2186 ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2187 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2188 if (ret)
2189 return ret;
2190
2191 ctx->mux_ctx.current_parent = 0;
2192 ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2193 &clk_multiple_parents_mux_ops,
2194 0);
2195 ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2196 if (ret)
2197 return ret;
2198
2199 ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw,
2200 &empty_clk_ops, CLK_SET_RATE_PARENT);
2201 ret = clk_hw_register(NULL, &ctx->parent);
2202 if (ret)
2203 return ret;
2204
2205 ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent,
2206 &clk_leaf_mux_set_rate_parent_ops,
2207 CLK_SET_RATE_PARENT);
2208 ret = clk_hw_register(NULL, &ctx->hw);
2209 if (ret)
2210 return ret;
2211
2212 return 0;
2213}
2214
2215static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2216{
2217 struct clk_leaf_mux_ctx *ctx = test->priv;
2218
2219 clk_hw_unregister(&ctx->hw);
2220 clk_hw_unregister(&ctx->parent);
2221 clk_hw_unregister(&ctx->mux_ctx.hw);
2222 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2223 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2224}
2225
2226struct clk_leaf_mux_set_rate_parent_determine_rate_test_case {
2227 const char *desc;
2228 int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2229};
2230
2231static void
2232clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(
2233 const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc)
2234{
2235 strcpy(desc, t->desc);
2236}
2237
2238static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case
2239clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = {
2240 {
2241 /*
2242 * Test that __clk_determine_rate() on the parent that can't
2243 * change rate doesn't return a clk_rate_request structure with
2244 * the best_parent_hw pointer pointing to the parent.
2245 */
2246 .desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent",
2247 .determine_rate_func = __clk_determine_rate,
2248 },
2249 {
2250 /*
2251 * Test that __clk_mux_determine_rate() on the parent that
2252 * can't change rate doesn't return a clk_rate_request
2253 * structure with the best_parent_hw pointer pointing to
2254 * the parent.
2255 */
2256 .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent",
2257 .determine_rate_func = __clk_mux_determine_rate,
2258 },
2259 {
2260 /*
2261 * Test that __clk_mux_determine_rate_closest() on the parent
2262 * that can't change rate doesn't return a clk_rate_request
2263 * structure with the best_parent_hw pointer pointing to
2264 * the parent.
2265 */
2266 .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent",
2267 .determine_rate_func = __clk_mux_determine_rate_closest,
2268 },
2269 {
2270 /*
2271 * Test that clk_hw_determine_rate_no_reparent() on the parent
2272 * that can't change rate doesn't return a clk_rate_request
2273 * structure with the best_parent_hw pointer pointing to
2274 * the parent.
2275 */
2276 .desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent",
2277 .determine_rate_func = clk_hw_determine_rate_no_reparent,
2278 },
2279};
2280
2281KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2282 clk_leaf_mux_set_rate_parent_determine_rate_test_cases,
2283 clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)
2284
2285/*
2286 * Test that when a clk that can't change rate itself calls a function like
2287 * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request
2288 * structure that has the best_parent_hw pointer point to the clk_hw passed
2289 * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop
2290 * forwarding clk_rate_requests to the parent") for more background.
2291 */
2292static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test)
2293{
2294 struct clk_leaf_mux_ctx *ctx = test->priv;
2295 struct clk_hw *hw = &ctx->hw;
2296 struct clk *clk = clk_hw_get_clk(hw, NULL);
2297 struct clk_rate_request req;
2298 unsigned long rate;
2299 const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param;
2300
2301 test_param = test->param_value;
2302 ctx->determine_rate_func = test_param->determine_rate_func;
2303
2304 ctx->req = &req;
2305 rate = clk_get_rate(clk);
2306 KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2307 KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2));
2308
2309 KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2310 KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2311 KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2312
2313 clk_put(clk);
2314}
2315
2316static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2317 KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2318 clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params),
2319 {}
2320};
2321
2322/*
2323 * Test suite for a clock whose parent is a pass-through clk whose parent is a
2324 * mux with multiple parents. The leaf and pass-through clocks have the
2325 * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which
2326 * will then select which parent is the best fit for a given rate.
2327 *
2328 * These tests exercise the behaviour of muxes, and the proper selection
2329 * of parents.
2330 */
2331static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2332 .name = "clk-leaf-mux-set-rate-parent",
2333 .init = clk_leaf_mux_set_rate_parent_test_init,
2334 .exit = clk_leaf_mux_set_rate_parent_test_exit,
2335 .test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2336};
2337
2338struct clk_mux_notifier_rate_change {
2339 bool done;
2340 unsigned long old_rate;
2341 unsigned long new_rate;
2342 wait_queue_head_t wq;
2343};
2344
2345struct clk_mux_notifier_ctx {
2346 struct clk_multiple_parent_ctx mux_ctx;
2347 struct clk *clk;
2348 struct notifier_block clk_nb;
2349 struct clk_mux_notifier_rate_change pre_rate_change;
2350 struct clk_mux_notifier_rate_change post_rate_change;
2351};
2352
2353#define NOTIFIER_TIMEOUT_MS 100
2354
2355static int clk_mux_notifier_callback(struct notifier_block *nb,
2356 unsigned long action, void *data)
2357{
2358 struct clk_notifier_data *clk_data = data;
2359 struct clk_mux_notifier_ctx *ctx = container_of(nb,
2360 struct clk_mux_notifier_ctx,
2361 clk_nb);
2362
2363 if (action & PRE_RATE_CHANGE) {
2364 ctx->pre_rate_change.old_rate = clk_data->old_rate;
2365 ctx->pre_rate_change.new_rate = clk_data->new_rate;
2366 ctx->pre_rate_change.done = true;
2367 wake_up_interruptible(&ctx->pre_rate_change.wq);
2368 }
2369
2370 if (action & POST_RATE_CHANGE) {
2371 ctx->post_rate_change.old_rate = clk_data->old_rate;
2372 ctx->post_rate_change.new_rate = clk_data->new_rate;
2373 ctx->post_rate_change.done = true;
2374 wake_up_interruptible(&ctx->post_rate_change.wq);
2375 }
2376
2377 return 0;
2378}
2379
2380static int clk_mux_notifier_test_init(struct kunit *test)
2381{
2382 struct clk_mux_notifier_ctx *ctx;
2383 const char *top_parents[2] = { "parent-0", "parent-1" };
2384 int ret;
2385
2386 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2387 if (!ctx)
2388 return -ENOMEM;
2389 test->priv = ctx;
2390 ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2391 init_waitqueue_head(&ctx->pre_rate_change.wq);
2392 init_waitqueue_head(&ctx->post_rate_change.wq);
2393
2394 ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2395 &clk_dummy_rate_ops,
2396 0);
2397 ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2398 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2399 if (ret)
2400 return ret;
2401
2402 ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2403 &clk_dummy_rate_ops,
2404 0);
2405 ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2406 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2407 if (ret)
2408 return ret;
2409
2410 ctx->mux_ctx.current_parent = 0;
2411 ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2412 &clk_multiple_parents_mux_ops,
2413 0);
2414 ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2415 if (ret)
2416 return ret;
2417
2418 ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2419 ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2420 if (ret)
2421 return ret;
2422
2423 return 0;
2424}
2425
2426static void clk_mux_notifier_test_exit(struct kunit *test)
2427{
2428 struct clk_mux_notifier_ctx *ctx = test->priv;
2429 struct clk *clk = ctx->clk;
2430
2431 clk_notifier_unregister(clk, &ctx->clk_nb);
2432 clk_put(clk);
2433
2434 clk_hw_unregister(&ctx->mux_ctx.hw);
2435 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2436 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2437}
2438
2439/*
2440 * Test that if the we have a notifier registered on a mux, the core
2441 * will notify us when we switch to another parent, and with the proper
2442 * old and new rates.
2443 */
2444static void clk_mux_notifier_set_parent_test(struct kunit *test)
2445{
2446 struct clk_mux_notifier_ctx *ctx = test->priv;
2447 struct clk_hw *hw = &ctx->mux_ctx.hw;
2448 struct clk *clk = clk_hw_get_clk(hw, NULL);
2449 struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2450 int ret;
2451
2452 ret = clk_set_parent(clk, new_parent);
2453 KUNIT_ASSERT_EQ(test, ret, 0);
2454
2455 ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2456 ctx->pre_rate_change.done,
2457 msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2458 KUNIT_ASSERT_GT(test, ret, 0);
2459
2460 KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2461 KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2462
2463 ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2464 ctx->post_rate_change.done,
2465 msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2466 KUNIT_ASSERT_GT(test, ret, 0);
2467
2468 KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2469 KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2470
2471 clk_put(new_parent);
2472 clk_put(clk);
2473}
2474
2475static struct kunit_case clk_mux_notifier_test_cases[] = {
2476 KUNIT_CASE(clk_mux_notifier_set_parent_test),
2477 {}
2478};
2479
2480/*
2481 * Test suite for a mux with multiple parents, and a notifier registered
2482 * on the mux.
2483 *
2484 * These tests exercise the behaviour of notifiers.
2485 */
2486static struct kunit_suite clk_mux_notifier_test_suite = {
2487 .name = "clk-mux-notifier",
2488 .init = clk_mux_notifier_test_init,
2489 .exit = clk_mux_notifier_test_exit,
2490 .test_cases = clk_mux_notifier_test_cases,
2491};
2492
2493static int
2494clk_mux_no_reparent_test_init(struct kunit *test)
2495{
2496 struct clk_multiple_parent_ctx *ctx;
2497 const char *parents[2] = { "parent-0", "parent-1"};
2498 int ret;
2499
2500 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2501 if (!ctx)
2502 return -ENOMEM;
2503 test->priv = ctx;
2504
2505 ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2506 &clk_dummy_rate_ops,
2507 0);
2508 ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2509 ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
2510 if (ret)
2511 return ret;
2512
2513 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2514 &clk_dummy_rate_ops,
2515 0);
2516 ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2517 ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
2518 if (ret)
2519 return ret;
2520
2521 ctx->current_parent = 0;
2522 ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
2523 &clk_multiple_parents_no_reparent_mux_ops,
2524 0);
2525 ret = clk_hw_register(NULL, &ctx->hw);
2526 if (ret)
2527 return ret;
2528
2529 return 0;
2530}
2531
2532static void
2533clk_mux_no_reparent_test_exit(struct kunit *test)
2534{
2535 struct clk_multiple_parent_ctx *ctx = test->priv;
2536
2537 clk_hw_unregister(&ctx->hw);
2538 clk_hw_unregister(&ctx->parents_ctx[0].hw);
2539 clk_hw_unregister(&ctx->parents_ctx[1].hw);
2540}
2541
2542/*
2543 * Test that if the we have a mux that cannot change parent and we call
2544 * clk_round_rate() on it with a rate that should cause it to change
2545 * parent, it won't.
2546 */
2547static void clk_mux_no_reparent_round_rate(struct kunit *test)
2548{
2549 struct clk_multiple_parent_ctx *ctx = test->priv;
2550 struct clk_hw *hw = &ctx->hw;
2551 struct clk *clk = clk_hw_get_clk(hw, NULL);
2552 struct clk *other_parent, *parent;
2553 unsigned long other_parent_rate;
2554 unsigned long parent_rate;
2555 long rounded_rate;
2556
2557 parent = clk_get_parent(clk);
2558 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2559
2560 parent_rate = clk_get_rate(parent);
2561 KUNIT_ASSERT_GT(test, parent_rate, 0);
2562
2563 other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2564 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2565 KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2566
2567 other_parent_rate = clk_get_rate(other_parent);
2568 KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2569 clk_put(other_parent);
2570
2571 rounded_rate = clk_round_rate(clk, other_parent_rate);
2572 KUNIT_ASSERT_GT(test, rounded_rate, 0);
2573 KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
2574
2575 clk_put(clk);
2576}
2577
2578/*
2579 * Test that if the we have a mux that cannot change parent and we call
2580 * clk_set_rate() on it with a rate that should cause it to change
2581 * parent, it won't.
2582 */
2583static void clk_mux_no_reparent_set_rate(struct kunit *test)
2584{
2585 struct clk_multiple_parent_ctx *ctx = test->priv;
2586 struct clk_hw *hw = &ctx->hw;
2587 struct clk *clk = clk_hw_get_clk(hw, NULL);
2588 struct clk *other_parent, *parent;
2589 unsigned long other_parent_rate;
2590 unsigned long parent_rate;
2591 unsigned long rate;
2592 int ret;
2593
2594 parent = clk_get_parent(clk);
2595 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2596
2597 parent_rate = clk_get_rate(parent);
2598 KUNIT_ASSERT_GT(test, parent_rate, 0);
2599
2600 other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2601 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2602 KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2603
2604 other_parent_rate = clk_get_rate(other_parent);
2605 KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2606 clk_put(other_parent);
2607
2608 ret = clk_set_rate(clk, other_parent_rate);
2609 KUNIT_ASSERT_EQ(test, ret, 0);
2610
2611 rate = clk_get_rate(clk);
2612 KUNIT_ASSERT_GT(test, rate, 0);
2613 KUNIT_EXPECT_EQ(test, rate, parent_rate);
2614
2615 clk_put(clk);
2616}
2617
2618static struct kunit_case clk_mux_no_reparent_test_cases[] = {
2619 KUNIT_CASE(clk_mux_no_reparent_round_rate),
2620 KUNIT_CASE(clk_mux_no_reparent_set_rate),
2621 {}
2622};
2623
2624/*
2625 * Test suite for a clock mux that isn't allowed to change parent, using
2626 * the clk_hw_determine_rate_no_reparent() helper.
2627 *
2628 * These tests exercise that helper, and the proper selection of
2629 * rates and parents.
2630 */
2631static struct kunit_suite clk_mux_no_reparent_test_suite = {
2632 .name = "clk-mux-no-reparent",
2633 .init = clk_mux_no_reparent_test_init,
2634 .exit = clk_mux_no_reparent_test_exit,
2635 .test_cases = clk_mux_no_reparent_test_cases,
2636};
2637
2638struct clk_register_clk_parent_data_test_case {
2639 const char *desc;
2640 struct clk_parent_data pdata;
2641};
2642
2643static void
2644clk_register_clk_parent_data_test_case_to_desc(
2645 const struct clk_register_clk_parent_data_test_case *t, char *desc)
2646{
2647 strcpy(desc, t->desc);
2648}
2649
2650static const struct clk_register_clk_parent_data_test_case
2651clk_register_clk_parent_data_of_cases[] = {
2652 {
2653 /*
2654 * Test that a clk registered with a struct device_node can
2655 * find a parent based on struct clk_parent_data::index.
2656 */
2657 .desc = "clk_parent_data_of_index_test",
2658 .pdata.index = 0,
2659 },
2660 {
2661 /*
2662 * Test that a clk registered with a struct device_node can
2663 * find a parent based on struct clk_parent_data::fwname.
2664 */
2665 .desc = "clk_parent_data_of_fwname_test",
2666 .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2667 },
2668 {
2669 /*
2670 * Test that a clk registered with a struct device_node can
2671 * find a parent based on struct clk_parent_data::name.
2672 */
2673 .desc = "clk_parent_data_of_name_test",
2674 /* The index must be negative to indicate firmware not used */
2675 .pdata.index = -1,
2676 .pdata.name = CLK_PARENT_DATA_1MHZ_NAME,
2677 },
2678 {
2679 /*
2680 * Test that a clk registered with a struct device_node can
2681 * find a parent based on struct
2682 * clk_parent_data::{fw_name,name}.
2683 */
2684 .desc = "clk_parent_data_of_fwname_name_test",
2685 .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2686 .pdata.name = "not_matching",
2687 },
2688 {
2689 /*
2690 * Test that a clk registered with a struct device_node can
2691 * find a parent based on struct clk_parent_data::{index,name}.
2692 * Index takes priority.
2693 */
2694 .desc = "clk_parent_data_of_index_name_priority_test",
2695 .pdata.index = 0,
2696 .pdata.name = "not_matching",
2697 },
2698 {
2699 /*
2700 * Test that a clk registered with a struct device_node can
2701 * find a parent based on struct
2702 * clk_parent_data::{index,fwname,name}. The fw_name takes
2703 * priority over index and name.
2704 */
2705 .desc = "clk_parent_data_of_index_fwname_name_priority_test",
2706 .pdata.index = 1,
2707 .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2708 .pdata.name = "not_matching",
2709 },
2710};
2711
2712KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases,
2713 clk_register_clk_parent_data_test_case_to_desc)
2714
2715/**
2716 * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests
2717 * @np: device node of clk under test
2718 * @hw: clk_hw for clk under test
2719 */
2720struct clk_register_clk_parent_data_of_ctx {
2721 struct device_node *np;
2722 struct clk_hw hw;
2723};
2724
2725static int clk_register_clk_parent_data_of_test_init(struct kunit *test)
2726{
2727 struct clk_register_clk_parent_data_of_ctx *ctx;
2728
2729 KUNIT_ASSERT_EQ(test, 0,
2730 of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
2731
2732 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2733 if (!ctx)
2734 return -ENOMEM;
2735 test->priv = ctx;
2736
2737 ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data");
2738 if (!ctx->np)
2739 return -ENODEV;
2740
2741 of_node_put_kunit(test, ctx->np);
2742
2743 return 0;
2744}
2745
2746/*
2747 * Test that a clk registered with a struct device_node can find a parent based on
2748 * struct clk_parent_data when the hw member isn't set.
2749 */
2750static void clk_register_clk_parent_data_of_test(struct kunit *test)
2751{
2752 struct clk_register_clk_parent_data_of_ctx *ctx = test->priv;
2753 struct clk_hw *parent_hw;
2754 const struct clk_register_clk_parent_data_test_case *test_param;
2755 struct clk_init_data init = { };
2756 struct clk *expected_parent, *actual_parent;
2757
2758 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np);
2759
2760 expected_parent = of_clk_get_kunit(test, ctx->np, 0);
2761 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2762
2763 test_param = test->param_value;
2764 init.parent_data = &test_param->pdata;
2765 init.num_parents = 1;
2766 init.name = "parent_data_of_test_clk";
2767 init.ops = &clk_dummy_single_parent_ops;
2768 ctx->hw.init = &init;
2769 KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw));
2770
2771 parent_hw = clk_hw_get_parent(&ctx->hw);
2772 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2773
2774 actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2775 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2776
2777 KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2778}
2779
2780static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = {
2781 KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test,
2782 clk_register_clk_parent_data_of_test_gen_params),
2783 {}
2784};
2785
2786/*
2787 * Test suite for registering clks with struct clk_parent_data and a struct
2788 * device_node.
2789 */
2790static struct kunit_suite clk_register_clk_parent_data_of_suite = {
2791 .name = "clk_register_clk_parent_data_of",
2792 .init = clk_register_clk_parent_data_of_test_init,
2793 .test_cases = clk_register_clk_parent_data_of_test_cases,
2794};
2795
2796/**
2797 * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
2798 * @dev: device of clk under test
2799 * @hw: clk_hw for clk under test
2800 * @pdrv: driver to attach to find @dev
2801 */
2802struct clk_register_clk_parent_data_device_ctx {
2803 struct device *dev;
2804 struct clk_hw hw;
2805 struct platform_driver pdrv;
2806};
2807
2808static inline struct clk_register_clk_parent_data_device_ctx *
2809clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
2810{
2811 return container_of(to_platform_driver(pdev->dev.driver),
2812 struct clk_register_clk_parent_data_device_ctx, pdrv);
2813}
2814
2815static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
2816{
2817 struct clk_register_clk_parent_data_device_ctx *ctx;
2818
2819 ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
2820 ctx->dev = &pdev->dev;
2821
2822 return 0;
2823}
2824
2825static void clk_register_clk_parent_data_device_driver(struct kunit *test)
2826{
2827 struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
2828 static const struct of_device_id match_table[] = {
2829 { .compatible = "test,clk-parent-data" },
2830 { }
2831 };
2832
2833 ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
2834 ctx->pdrv.driver.of_match_table = match_table;
2835 ctx->pdrv.driver.name = __func__;
2836 ctx->pdrv.driver.owner = THIS_MODULE;
2837
2838 KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
2839 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
2840}
2841
2842static const struct clk_register_clk_parent_data_test_case
2843clk_register_clk_parent_data_device_cases[] = {
2844 {
2845 /*
2846 * Test that a clk registered with a struct device can find a
2847 * parent based on struct clk_parent_data::index.
2848 */
2849 .desc = "clk_parent_data_device_index_test",
2850 .pdata.index = 1,
2851 },
2852 {
2853 /*
2854 * Test that a clk registered with a struct device can find a
2855 * parent based on struct clk_parent_data::fwname.
2856 */
2857 .desc = "clk_parent_data_device_fwname_test",
2858 .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2859 },
2860 {
2861 /*
2862 * Test that a clk registered with a struct device can find a
2863 * parent based on struct clk_parent_data::name.
2864 */
2865 .desc = "clk_parent_data_device_name_test",
2866 /* The index must be negative to indicate firmware not used */
2867 .pdata.index = -1,
2868 .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
2869 },
2870 {
2871 /*
2872 * Test that a clk registered with a struct device can find a
2873 * parent based on struct clk_parent_data::{fw_name,name}.
2874 */
2875 .desc = "clk_parent_data_device_fwname_name_test",
2876 .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2877 .pdata.name = "not_matching",
2878 },
2879 {
2880 /*
2881 * Test that a clk registered with a struct device can find a
2882 * parent based on struct clk_parent_data::{index,name}. Index
2883 * takes priority.
2884 */
2885 .desc = "clk_parent_data_device_index_name_priority_test",
2886 .pdata.index = 1,
2887 .pdata.name = "not_matching",
2888 },
2889 {
2890 /*
2891 * Test that a clk registered with a struct device can find a
2892 * parent based on struct clk_parent_data::{index,fwname,name}.
2893 * The fw_name takes priority over index and name.
2894 */
2895 .desc = "clk_parent_data_device_index_fwname_name_priority_test",
2896 .pdata.index = 0,
2897 .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2898 .pdata.name = "not_matching",
2899 },
2900};
2901
2902KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
2903 clk_register_clk_parent_data_device_cases,
2904 clk_register_clk_parent_data_test_case_to_desc)
2905
2906/*
2907 * Test that a clk registered with a struct device can find a parent based on
2908 * struct clk_parent_data when the hw member isn't set.
2909 */
2910static void clk_register_clk_parent_data_device_test(struct kunit *test)
2911{
2912 struct clk_register_clk_parent_data_device_ctx *ctx;
2913 const struct clk_register_clk_parent_data_test_case *test_param;
2914 struct clk_hw *parent_hw;
2915 struct clk_init_data init = { };
2916 struct clk *expected_parent, *actual_parent;
2917
2918 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2919 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
2920 test->priv = ctx;
2921
2922 clk_register_clk_parent_data_device_driver(test);
2923
2924 expected_parent = clk_get_kunit(test, ctx->dev, "50");
2925 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2926
2927 test_param = test->param_value;
2928 init.parent_data = &test_param->pdata;
2929 init.num_parents = 1;
2930 init.name = "parent_data_device_test_clk";
2931 init.ops = &clk_dummy_single_parent_ops;
2932 ctx->hw.init = &init;
2933 KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
2934
2935 parent_hw = clk_hw_get_parent(&ctx->hw);
2936 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2937
2938 actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2939 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2940
2941 KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2942}
2943
2944static const struct clk_register_clk_parent_data_test_case
2945clk_register_clk_parent_data_device_hw_cases[] = {
2946 {
2947 /*
2948 * Test that a clk registered with a struct device can find a
2949 * parent based on struct clk_parent_data::hw.
2950 */
2951 .desc = "clk_parent_data_device_hw_index_test",
2952 /* The index must be negative to indicate firmware not used */
2953 .pdata.index = -1,
2954 },
2955 {
2956 /*
2957 * Test that a clk registered with a struct device can find a
2958 * parent based on struct clk_parent_data::hw when
2959 * struct clk_parent_data::fw_name is set.
2960 */
2961 .desc = "clk_parent_data_device_hw_fwname_test",
2962 .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2963 },
2964 {
2965 /*
2966 * Test that a clk registered with a struct device can find a
2967 * parent based on struct clk_parent_data::hw when struct
2968 * clk_parent_data::name is set.
2969 */
2970 .desc = "clk_parent_data_device_hw_name_test",
2971 /* The index must be negative to indicate firmware not used */
2972 .pdata.index = -1,
2973 .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
2974 },
2975 {
2976 /*
2977 * Test that a clk registered with a struct device can find a
2978 * parent based on struct clk_parent_data::hw when struct
2979 * clk_parent_data::{fw_name,name} are set.
2980 */
2981 .desc = "clk_parent_data_device_hw_fwname_name_test",
2982 .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2983 .pdata.name = "not_matching",
2984 },
2985 {
2986 /*
2987 * Test that a clk registered with a struct device can find a
2988 * parent based on struct clk_parent_data::hw when struct
2989 * clk_parent_data::index is set. The hw pointer takes
2990 * priority.
2991 */
2992 .desc = "clk_parent_data_device_hw_index_priority_test",
2993 .pdata.index = 0,
2994 },
2995 {
2996 /*
2997 * Test that a clk registered with a struct device can find a
2998 * parent based on struct clk_parent_data::hw when
2999 * struct clk_parent_data::{index,fwname,name} are set.
3000 * The hw pointer takes priority over everything else.
3001 */
3002 .desc = "clk_parent_data_device_hw_index_fwname_name_priority_test",
3003 .pdata.index = 0,
3004 .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
3005 .pdata.name = "not_matching",
3006 },
3007};
3008
3009KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
3010 clk_register_clk_parent_data_device_hw_cases,
3011 clk_register_clk_parent_data_test_case_to_desc)
3012
3013/*
3014 * Test that a clk registered with a struct device can find a
3015 * parent based on struct clk_parent_data::hw.
3016 */
3017static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
3018{
3019 struct clk_register_clk_parent_data_device_ctx *ctx;
3020 const struct clk_register_clk_parent_data_test_case *test_param;
3021 struct clk_dummy_context *parent;
3022 struct clk_hw *parent_hw;
3023 struct clk_parent_data pdata = { };
3024 struct clk_init_data init = { };
3025
3026 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
3027 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
3028 test->priv = ctx;
3029
3030 clk_register_clk_parent_data_device_driver(test);
3031
3032 parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
3033 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
3034
3035 parent_hw = &parent->hw;
3036 parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
3037 &clk_dummy_rate_ops, 0);
3038
3039 KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
3040
3041 test_param = test->param_value;
3042 memcpy(&pdata, &test_param->pdata, sizeof(pdata));
3043 pdata.hw = parent_hw;
3044 init.parent_data = &pdata;
3045 init.num_parents = 1;
3046 init.ops = &clk_dummy_single_parent_ops;
3047 init.name = "parent_data_device_hw_test_clk";
3048 ctx->hw.init = &init;
3049 KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
3050
3051 KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
3052}
3053
3054static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
3055 KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test,
3056 clk_register_clk_parent_data_device_test_gen_params),
3057 KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test,
3058 clk_register_clk_parent_data_device_hw_test_gen_params),
3059 {}
3060};
3061
3062static int clk_register_clk_parent_data_device_init(struct kunit *test)
3063{
3064 KUNIT_ASSERT_EQ(test, 0,
3065 of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
3066
3067 return 0;
3068}
3069
3070/*
3071 * Test suite for registering clks with struct clk_parent_data and a struct
3072 * device.
3073 */
3074static struct kunit_suite clk_register_clk_parent_data_device_suite = {
3075 .name = "clk_register_clk_parent_data_device",
3076 .init = clk_register_clk_parent_data_device_init,
3077 .test_cases = clk_register_clk_parent_data_device_test_cases,
3078};
3079
3080struct clk_assigned_rates_context {
3081 struct clk_dummy_context clk0;
3082 struct clk_dummy_context clk1;
3083};
3084
3085/*
3086 * struct clk_assigned_rates_test_param - Test parameters for clk_assigned_rates test
3087 * @desc: Test description
3088 * @overlay_begin: Pointer to start of DT overlay to apply for test
3089 * @overlay_end: Pointer to end of DT overlay to apply for test
3090 * @rate0: Initial rate of first clk
3091 * @rate1: Initial rate of second clk
3092 * @consumer_test: true if a consumer is being tested
3093 */
3094struct clk_assigned_rates_test_param {
3095 const char *desc;
3096 u8 *overlay_begin;
3097 u8 *overlay_end;
3098 unsigned long rate0;
3099 unsigned long rate1;
3100 bool consumer_test;
3101};
3102
3103#define TEST_PARAM_OVERLAY(overlay_name) \
3104 .overlay_begin = of_overlay_begin(overlay_name), \
3105 .overlay_end = of_overlay_end(overlay_name)
3106
3107static void
3108clk_assigned_rates_register_clk(struct kunit *test,
3109 struct clk_dummy_context *ctx,
3110 struct device_node *np, const char *name,
3111 unsigned long rate)
3112{
3113 struct clk_init_data init = { };
3114
3115 init.name = name;
3116 init.ops = &clk_dummy_rate_ops;
3117 ctx->hw.init = &init;
3118 ctx->rate = rate;
3119
3120 KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, &ctx->hw));
3121 KUNIT_ASSERT_EQ(test, ctx->rate, rate);
3122}
3123
3124/*
3125 * Does most of the work of the test:
3126 *
3127 * 1. Apply the overlay to test
3128 * 2. Register the clk or clks to test
3129 * 3. Register the clk provider
3130 * 4. Apply clk defaults to the consumer device if this is a consumer test
3131 *
3132 * The tests will set different test_param values to test different scenarios
3133 * and validate that in their test functions.
3134 */
3135static int clk_assigned_rates_test_init(struct kunit *test)
3136{
3137 struct device_node *np, *consumer;
3138 struct clk_hw_onecell_data *data;
3139 struct clk_assigned_rates_context *ctx;
3140 u32 clk_cells;
3141 const struct clk_assigned_rates_test_param *test_param;
3142
3143 test_param = test->param_value;
3144
3145 KUNIT_ASSERT_EQ(test, 0, __of_overlay_apply_kunit(test,
3146 test_param->overlay_begin,
3147 test_param->overlay_end));
3148
3149 KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3150 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL));
3151 test->priv = ctx;
3152
3153 KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3154 np = of_find_compatible_node(NULL, NULL, "test,clk-assigned-rates"));
3155 of_node_put_kunit(test, np);
3156
3157 KUNIT_ASSERT_EQ(test, 0, of_property_read_u32(np, "#clock-cells", &clk_cells));
3158 /* Only support #clock-cells = <0> or <1> */
3159 KUNIT_ASSERT_LT(test, clk_cells, 2);
3160
3161 clk_assigned_rates_register_clk(test, &ctx->clk0, np,
3162 "test_assigned_rate0", test_param->rate0);
3163 if (clk_cells == 0) {
3164 KUNIT_ASSERT_EQ(test, 0,
3165 of_clk_add_hw_provider_kunit(test, np, of_clk_hw_simple_get,
3166 &ctx->clk0.hw));
3167 } else if (clk_cells == 1) {
3168 clk_assigned_rates_register_clk(test, &ctx->clk1, np,
3169 "test_assigned_rate1", test_param->rate1);
3170
3171 KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3172 data = kunit_kzalloc(test, struct_size(data, hws, 2), GFP_KERNEL));
3173 data->num = 2;
3174 data->hws[0] = &ctx->clk0.hw;
3175 data->hws[1] = &ctx->clk1.hw;
3176
3177 KUNIT_ASSERT_EQ(test, 0,
3178 of_clk_add_hw_provider_kunit(test, np, of_clk_hw_onecell_get, data));
3179 }
3180
3181 /* Consumers are optional */
3182 if (test_param->consumer_test) {
3183 KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3184 consumer = of_find_compatible_node(NULL, NULL, "test,clk-consumer"));
3185 of_node_put_kunit(test, consumer);
3186
3187 KUNIT_ASSERT_EQ(test, 0, of_clk_set_defaults(consumer, false));
3188 }
3189
3190 return 0;
3191}
3192
3193static void clk_assigned_rates_assigns_one(struct kunit *test)
3194{
3195 struct clk_assigned_rates_context *ctx = test->priv;
3196
3197 KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
3198}
3199
3200static void clk_assigned_rates_assigns_multiple(struct kunit *test)
3201{
3202 struct clk_assigned_rates_context *ctx = test->priv;
3203
3204 KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
3205 KUNIT_EXPECT_EQ(test, ctx->clk1.rate, ASSIGNED_RATES_1_RATE);
3206}
3207
3208static void clk_assigned_rates_skips(struct kunit *test)
3209{
3210 struct clk_assigned_rates_context *ctx = test->priv;
3211 const struct clk_assigned_rates_test_param *test_param = test->param_value;
3212
3213 KUNIT_EXPECT_NE(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
3214 KUNIT_EXPECT_EQ(test, ctx->clk0.rate, test_param->rate0);
3215}
3216
3217OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one);
3218OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one_consumer);
3219OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one);
3220OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one_consumer);
3221
3222/* Test cases that assign one rate */
3223static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_one_test_params[] = {
3224 {
3225 /*
3226 * Test that a single cell assigned-clock-rates property
3227 * assigns the rate when the property is in the provider.
3228 */
3229 .desc = "provider assigns",
3230 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one),
3231 },
3232 {
3233 /*
3234 * Test that a single cell assigned-clock-rates property
3235 * assigns the rate when the property is in the consumer.
3236 */
3237 .desc = "consumer assigns",
3238 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one_consumer),
3239 .consumer_test = true,
3240 },
3241 {
3242 /*
3243 * Test that a single cell assigned-clock-rates-u64 property
3244 * assigns the rate when the property is in the provider.
3245 */
3246 .desc = "provider assigns u64",
3247 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one),
3248 },
3249 {
3250 /*
3251 * Test that a single cell assigned-clock-rates-u64 property
3252 * assigns the rate when the property is in the consumer.
3253 */
3254 .desc = "consumer assigns u64",
3255 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one_consumer),
3256 .consumer_test = true,
3257 },
3258};
3259KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_one,
3260 clk_assigned_rates_assigns_one_test_params, desc)
3261
3262OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple);
3263OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple_consumer);
3264OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple);
3265OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple_consumer);
3266
3267/* Test cases that assign multiple rates */
3268static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_multiple_test_params[] = {
3269 {
3270 /*
3271 * Test that a multiple cell assigned-clock-rates property
3272 * assigns the rates when the property is in the provider.
3273 */
3274 .desc = "provider assigns",
3275 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple),
3276 },
3277 {
3278 /*
3279 * Test that a multiple cell assigned-clock-rates property
3280 * assigns the rates when the property is in the consumer.
3281 */
3282 .desc = "consumer assigns",
3283 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple_consumer),
3284 .consumer_test = true,
3285 },
3286 {
3287 /*
3288 * Test that a single cell assigned-clock-rates-u64 property
3289 * assigns the rate when the property is in the provider.
3290 */
3291 .desc = "provider assigns u64",
3292 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple),
3293 },
3294 {
3295 /*
3296 * Test that a multiple cell assigned-clock-rates-u64 property
3297 * assigns the rates when the property is in the consumer.
3298 */
3299 .desc = "consumer assigns u64",
3300 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple_consumer),
3301 .consumer_test = true,
3302 },
3303};
3304KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_multiple,
3305 clk_assigned_rates_assigns_multiple_test_params,
3306 desc)
3307
3308OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without);
3309OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without_consumer);
3310OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero);
3311OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero_consumer);
3312OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null);
3313OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null_consumer);
3314
3315/* Test cases that skip changing the rate due to malformed DT */
3316static const struct clk_assigned_rates_test_param clk_assigned_rates_skips_test_params[] = {
3317 {
3318 /*
3319 * Test that an assigned-clock-rates property without an assigned-clocks
3320 * property fails when the property is in the provider.
3321 */
3322 .desc = "provider missing assigned-clocks",
3323 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without),
3324 .rate0 = 3000,
3325 },
3326 {
3327 /*
3328 * Test that an assigned-clock-rates property without an assigned-clocks
3329 * property fails when the property is in the consumer.
3330 */
3331 .desc = "consumer missing assigned-clocks",
3332 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without_consumer),
3333 .rate0 = 3000,
3334 .consumer_test = true,
3335 },
3336 {
3337 /*
3338 * Test that an assigned-clock-rates property of zero doesn't
3339 * set a rate when the property is in the provider.
3340 */
3341 .desc = "provider assigned-clock-rates of zero",
3342 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero),
3343 .rate0 = 3000,
3344 },
3345 {
3346 /*
3347 * Test that an assigned-clock-rates property of zero doesn't
3348 * set a rate when the property is in the consumer.
3349 */
3350 .desc = "consumer assigned-clock-rates of zero",
3351 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero_consumer),
3352 .rate0 = 3000,
3353 .consumer_test = true,
3354 },
3355 {
3356 /*
3357 * Test that an assigned-clocks property with a null phandle
3358 * doesn't set a rate when the property is in the provider.
3359 */
3360 .desc = "provider assigned-clocks null phandle",
3361 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null),
3362 .rate0 = 3000,
3363 },
3364 {
3365 /*
3366 * Test that an assigned-clocks property with a null phandle
3367 * doesn't set a rate when the property is in the consumer.
3368 */
3369 .desc = "provider assigned-clocks null phandle",
3370 TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null_consumer),
3371 .rate0 = 3000,
3372 .consumer_test = true,
3373 },
3374};
3375KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_skips,
3376 clk_assigned_rates_skips_test_params,
3377 desc)
3378
3379static struct kunit_case clk_assigned_rates_test_cases[] = {
3380 KUNIT_CASE_PARAM(clk_assigned_rates_assigns_one,
3381 clk_assigned_rates_assigns_one_gen_params),
3382 KUNIT_CASE_PARAM(clk_assigned_rates_assigns_multiple,
3383 clk_assigned_rates_assigns_multiple_gen_params),
3384 KUNIT_CASE_PARAM(clk_assigned_rates_skips,
3385 clk_assigned_rates_skips_gen_params),
3386 {}
3387};
3388
3389/*
3390 * Test suite for assigned-clock-rates{-u64} DT property.
3391 */
3392static struct kunit_suite clk_assigned_rates_suite = {
3393 .name = "clk_assigned_rates",
3394 .test_cases = clk_assigned_rates_test_cases,
3395 .init = clk_assigned_rates_test_init,
3396};
3397
3398kunit_test_suites(
3399 &clk_assigned_rates_suite,
3400 &clk_leaf_mux_set_rate_parent_test_suite,
3401 &clk_test_suite,
3402 &clk_multiple_parents_mux_test_suite,
3403 &clk_mux_no_reparent_test_suite,
3404 &clk_mux_notifier_test_suite,
3405 &clk_orphan_transparent_multiple_parent_mux_test_suite,
3406 &clk_orphan_transparent_single_parent_test_suite,
3407 &clk_orphan_two_level_root_last_test_suite,
3408 &clk_range_test_suite,
3409 &clk_range_maximize_test_suite,
3410 &clk_range_minimize_test_suite,
3411 &clk_register_clk_parent_data_of_suite,
3412 &clk_register_clk_parent_data_device_suite,
3413 &clk_single_parent_mux_test_suite,
3414 &clk_uncached_test_suite,
3415);
3416MODULE_DESCRIPTION("Kunit tests for clk framework");
3417MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kunit test for clk rate management
4 */
5#include <linux/clk.h>
6#include <linux/clk-provider.h>
7
8/* Needed for clk_hw_get_clk() */
9#include "clk.h"
10
11#include <kunit/test.h>
12
13static const struct clk_ops empty_clk_ops = { };
14
15#define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
16#define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000)
17#define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000)
18
19struct clk_dummy_context {
20 struct clk_hw hw;
21 unsigned long rate;
22};
23
24static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
25 unsigned long parent_rate)
26{
27 struct clk_dummy_context *ctx =
28 container_of(hw, struct clk_dummy_context, hw);
29
30 return ctx->rate;
31}
32
33static int clk_dummy_determine_rate(struct clk_hw *hw,
34 struct clk_rate_request *req)
35{
36 /* Just return the same rate without modifying it */
37 return 0;
38}
39
40static int clk_dummy_maximize_rate(struct clk_hw *hw,
41 struct clk_rate_request *req)
42{
43 /*
44 * If there's a maximum set, always run the clock at the maximum
45 * allowed.
46 */
47 if (req->max_rate < ULONG_MAX)
48 req->rate = req->max_rate;
49
50 return 0;
51}
52
53static int clk_dummy_minimize_rate(struct clk_hw *hw,
54 struct clk_rate_request *req)
55{
56 /*
57 * If there's a minimum set, always run the clock at the minimum
58 * allowed.
59 */
60 if (req->min_rate > 0)
61 req->rate = req->min_rate;
62
63 return 0;
64}
65
66static int clk_dummy_set_rate(struct clk_hw *hw,
67 unsigned long rate,
68 unsigned long parent_rate)
69{
70 struct clk_dummy_context *ctx =
71 container_of(hw, struct clk_dummy_context, hw);
72
73 ctx->rate = rate;
74 return 0;
75}
76
77static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
78{
79 if (index >= clk_hw_get_num_parents(hw))
80 return -EINVAL;
81
82 return 0;
83}
84
85static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
86{
87 return 0;
88}
89
90static const struct clk_ops clk_dummy_rate_ops = {
91 .recalc_rate = clk_dummy_recalc_rate,
92 .determine_rate = clk_dummy_determine_rate,
93 .set_rate = clk_dummy_set_rate,
94};
95
96static const struct clk_ops clk_dummy_maximize_rate_ops = {
97 .recalc_rate = clk_dummy_recalc_rate,
98 .determine_rate = clk_dummy_maximize_rate,
99 .set_rate = clk_dummy_set_rate,
100};
101
102static const struct clk_ops clk_dummy_minimize_rate_ops = {
103 .recalc_rate = clk_dummy_recalc_rate,
104 .determine_rate = clk_dummy_minimize_rate,
105 .set_rate = clk_dummy_set_rate,
106};
107
108static const struct clk_ops clk_dummy_single_parent_ops = {
109 /*
110 * FIXME: Even though we should probably be able to use
111 * __clk_mux_determine_rate() here, if we use it and call
112 * clk_round_rate() or clk_set_rate() with a rate lower than
113 * what all the parents can provide, it will return -EINVAL.
114 *
115 * This is due to the fact that it has the undocumented
116 * behaviour to always pick up the closest rate higher than the
117 * requested rate. If we get something lower, it thus considers
118 * that it's not acceptable and will return an error.
119 *
120 * It's somewhat inconsistent and creates a weird threshold
121 * between rates above the parent rate which would be rounded to
122 * what the parent can provide, but rates below will simply
123 * return an error.
124 */
125 .determine_rate = __clk_mux_determine_rate_closest,
126 .set_parent = clk_dummy_single_set_parent,
127 .get_parent = clk_dummy_single_get_parent,
128};
129
130struct clk_multiple_parent_ctx {
131 struct clk_dummy_context parents_ctx[2];
132 struct clk_hw hw;
133 u8 current_parent;
134};
135
136static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
137{
138 struct clk_multiple_parent_ctx *ctx =
139 container_of(hw, struct clk_multiple_parent_ctx, hw);
140
141 if (index >= clk_hw_get_num_parents(hw))
142 return -EINVAL;
143
144 ctx->current_parent = index;
145
146 return 0;
147}
148
149static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
150{
151 struct clk_multiple_parent_ctx *ctx =
152 container_of(hw, struct clk_multiple_parent_ctx, hw);
153
154 return ctx->current_parent;
155}
156
157static const struct clk_ops clk_multiple_parents_mux_ops = {
158 .get_parent = clk_multiple_parents_mux_get_parent,
159 .set_parent = clk_multiple_parents_mux_set_parent,
160 .determine_rate = __clk_mux_determine_rate_closest,
161};
162
163static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
164 .determine_rate = clk_hw_determine_rate_no_reparent,
165 .get_parent = clk_multiple_parents_mux_get_parent,
166 .set_parent = clk_multiple_parents_mux_set_parent,
167};
168
169static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
170{
171 struct clk_dummy_context *ctx;
172 struct clk_init_data init = { };
173 int ret;
174
175 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
176 if (!ctx)
177 return -ENOMEM;
178 ctx->rate = DUMMY_CLOCK_INIT_RATE;
179 test->priv = ctx;
180
181 init.name = "test_dummy_rate";
182 init.ops = ops;
183 ctx->hw.init = &init;
184
185 ret = clk_hw_register(NULL, &ctx->hw);
186 if (ret)
187 return ret;
188
189 return 0;
190}
191
192static int clk_test_init(struct kunit *test)
193{
194 return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
195}
196
197static int clk_maximize_test_init(struct kunit *test)
198{
199 return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
200}
201
202static int clk_minimize_test_init(struct kunit *test)
203{
204 return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
205}
206
207static void clk_test_exit(struct kunit *test)
208{
209 struct clk_dummy_context *ctx = test->priv;
210
211 clk_hw_unregister(&ctx->hw);
212}
213
214/*
215 * Test that the actual rate matches what is returned by clk_get_rate()
216 */
217static void clk_test_get_rate(struct kunit *test)
218{
219 struct clk_dummy_context *ctx = test->priv;
220 struct clk_hw *hw = &ctx->hw;
221 struct clk *clk = clk_hw_get_clk(hw, NULL);
222 unsigned long rate;
223
224 rate = clk_get_rate(clk);
225 KUNIT_ASSERT_GT(test, rate, 0);
226 KUNIT_EXPECT_EQ(test, rate, ctx->rate);
227
228 clk_put(clk);
229}
230
231/*
232 * Test that, after a call to clk_set_rate(), the rate returned by
233 * clk_get_rate() matches.
234 *
235 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
236 * modify the requested rate, which is our case in clk_dummy_rate_ops.
237 */
238static void clk_test_set_get_rate(struct kunit *test)
239{
240 struct clk_dummy_context *ctx = test->priv;
241 struct clk_hw *hw = &ctx->hw;
242 struct clk *clk = clk_hw_get_clk(hw, NULL);
243 unsigned long rate;
244
245 KUNIT_ASSERT_EQ(test,
246 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
247 0);
248
249 rate = clk_get_rate(clk);
250 KUNIT_ASSERT_GT(test, rate, 0);
251 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
252
253 clk_put(clk);
254}
255
256/*
257 * Test that, after several calls to clk_set_rate(), the rate returned
258 * by clk_get_rate() matches the last one.
259 *
260 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
261 * modify the requested rate, which is our case in clk_dummy_rate_ops.
262 */
263static void clk_test_set_set_get_rate(struct kunit *test)
264{
265 struct clk_dummy_context *ctx = test->priv;
266 struct clk_hw *hw = &ctx->hw;
267 struct clk *clk = clk_hw_get_clk(hw, NULL);
268 unsigned long rate;
269
270 KUNIT_ASSERT_EQ(test,
271 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
272 0);
273
274 KUNIT_ASSERT_EQ(test,
275 clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
276 0);
277
278 rate = clk_get_rate(clk);
279 KUNIT_ASSERT_GT(test, rate, 0);
280 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
281
282 clk_put(clk);
283}
284
285/*
286 * Test that clk_round_rate and clk_set_rate are consitent and will
287 * return the same frequency.
288 */
289static void clk_test_round_set_get_rate(struct kunit *test)
290{
291 struct clk_dummy_context *ctx = test->priv;
292 struct clk_hw *hw = &ctx->hw;
293 struct clk *clk = clk_hw_get_clk(hw, NULL);
294 unsigned long set_rate;
295 long rounded_rate;
296
297 rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
298 KUNIT_ASSERT_GT(test, rounded_rate, 0);
299 KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
300
301 KUNIT_ASSERT_EQ(test,
302 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
303 0);
304
305 set_rate = clk_get_rate(clk);
306 KUNIT_ASSERT_GT(test, set_rate, 0);
307 KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
308
309 clk_put(clk);
310}
311
312static struct kunit_case clk_test_cases[] = {
313 KUNIT_CASE(clk_test_get_rate),
314 KUNIT_CASE(clk_test_set_get_rate),
315 KUNIT_CASE(clk_test_set_set_get_rate),
316 KUNIT_CASE(clk_test_round_set_get_rate),
317 {}
318};
319
320/*
321 * Test suite for a basic rate clock, without any parent.
322 *
323 * These tests exercise the rate API with simple scenarios
324 */
325static struct kunit_suite clk_test_suite = {
326 .name = "clk-test",
327 .init = clk_test_init,
328 .exit = clk_test_exit,
329 .test_cases = clk_test_cases,
330};
331
332static int clk_uncached_test_init(struct kunit *test)
333{
334 struct clk_dummy_context *ctx;
335 int ret;
336
337 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
338 if (!ctx)
339 return -ENOMEM;
340 test->priv = ctx;
341
342 ctx->rate = DUMMY_CLOCK_INIT_RATE;
343 ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
344 &clk_dummy_rate_ops,
345 CLK_GET_RATE_NOCACHE);
346
347 ret = clk_hw_register(NULL, &ctx->hw);
348 if (ret)
349 return ret;
350
351 return 0;
352}
353
354/*
355 * Test that for an uncached clock, the clock framework doesn't cache
356 * the rate and clk_get_rate() will return the underlying clock rate
357 * even if it changed.
358 */
359static void clk_test_uncached_get_rate(struct kunit *test)
360{
361 struct clk_dummy_context *ctx = test->priv;
362 struct clk_hw *hw = &ctx->hw;
363 struct clk *clk = clk_hw_get_clk(hw, NULL);
364 unsigned long rate;
365
366 rate = clk_get_rate(clk);
367 KUNIT_ASSERT_GT(test, rate, 0);
368 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
369
370 /* We change the rate behind the clock framework's back */
371 ctx->rate = DUMMY_CLOCK_RATE_1;
372 rate = clk_get_rate(clk);
373 KUNIT_ASSERT_GT(test, rate, 0);
374 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
375
376 clk_put(clk);
377}
378
379/*
380 * Test that for an uncached clock, clk_set_rate_range() will work
381 * properly if the rate hasn't changed.
382 */
383static void clk_test_uncached_set_range(struct kunit *test)
384{
385 struct clk_dummy_context *ctx = test->priv;
386 struct clk_hw *hw = &ctx->hw;
387 struct clk *clk = clk_hw_get_clk(hw, NULL);
388 unsigned long rate;
389
390 KUNIT_ASSERT_EQ(test,
391 clk_set_rate_range(clk,
392 DUMMY_CLOCK_RATE_1,
393 DUMMY_CLOCK_RATE_2),
394 0);
395
396 rate = clk_get_rate(clk);
397 KUNIT_ASSERT_GT(test, rate, 0);
398 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
399 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
400
401 clk_put(clk);
402}
403
404/*
405 * Test that for an uncached clock, clk_set_rate_range() will work
406 * properly if the rate has changed in hardware.
407 *
408 * In this case, it means that if the rate wasn't initially in the range
409 * we're trying to set, but got changed at some point into the range
410 * without the kernel knowing about it, its rate shouldn't be affected.
411 */
412static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
413{
414 struct clk_dummy_context *ctx = test->priv;
415 struct clk_hw *hw = &ctx->hw;
416 struct clk *clk = clk_hw_get_clk(hw, NULL);
417 unsigned long rate;
418
419 /* We change the rate behind the clock framework's back */
420 ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
421 KUNIT_ASSERT_EQ(test,
422 clk_set_rate_range(clk,
423 DUMMY_CLOCK_RATE_1,
424 DUMMY_CLOCK_RATE_2),
425 0);
426
427 rate = clk_get_rate(clk);
428 KUNIT_ASSERT_GT(test, rate, 0);
429 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
430
431 clk_put(clk);
432}
433
434static struct kunit_case clk_uncached_test_cases[] = {
435 KUNIT_CASE(clk_test_uncached_get_rate),
436 KUNIT_CASE(clk_test_uncached_set_range),
437 KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
438 {}
439};
440
441/*
442 * Test suite for a basic, uncached, rate clock, without any parent.
443 *
444 * These tests exercise the rate API with simple scenarios
445 */
446static struct kunit_suite clk_uncached_test_suite = {
447 .name = "clk-uncached-test",
448 .init = clk_uncached_test_init,
449 .exit = clk_test_exit,
450 .test_cases = clk_uncached_test_cases,
451};
452
453static int
454clk_multiple_parents_mux_test_init(struct kunit *test)
455{
456 struct clk_multiple_parent_ctx *ctx;
457 const char *parents[2] = { "parent-0", "parent-1"};
458 int ret;
459
460 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
461 if (!ctx)
462 return -ENOMEM;
463 test->priv = ctx;
464
465 ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
466 &clk_dummy_rate_ops,
467 0);
468 ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
469 ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
470 if (ret)
471 return ret;
472
473 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
474 &clk_dummy_rate_ops,
475 0);
476 ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
477 ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
478 if (ret)
479 return ret;
480
481 ctx->current_parent = 0;
482 ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
483 &clk_multiple_parents_mux_ops,
484 CLK_SET_RATE_PARENT);
485 ret = clk_hw_register(NULL, &ctx->hw);
486 if (ret)
487 return ret;
488
489 return 0;
490}
491
492static void
493clk_multiple_parents_mux_test_exit(struct kunit *test)
494{
495 struct clk_multiple_parent_ctx *ctx = test->priv;
496
497 clk_hw_unregister(&ctx->hw);
498 clk_hw_unregister(&ctx->parents_ctx[0].hw);
499 clk_hw_unregister(&ctx->parents_ctx[1].hw);
500}
501
502/*
503 * Test that for a clock with multiple parents, clk_get_parent()
504 * actually returns the current one.
505 */
506static void
507clk_test_multiple_parents_mux_get_parent(struct kunit *test)
508{
509 struct clk_multiple_parent_ctx *ctx = test->priv;
510 struct clk_hw *hw = &ctx->hw;
511 struct clk *clk = clk_hw_get_clk(hw, NULL);
512 struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
513
514 KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
515
516 clk_put(parent);
517 clk_put(clk);
518}
519
520/*
521 * Test that for a clock with a multiple parents, clk_has_parent()
522 * actually reports all of them as parents.
523 */
524static void
525clk_test_multiple_parents_mux_has_parent(struct kunit *test)
526{
527 struct clk_multiple_parent_ctx *ctx = test->priv;
528 struct clk_hw *hw = &ctx->hw;
529 struct clk *clk = clk_hw_get_clk(hw, NULL);
530 struct clk *parent;
531
532 parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
533 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
534 clk_put(parent);
535
536 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
537 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
538 clk_put(parent);
539
540 clk_put(clk);
541}
542
543/*
544 * Test that for a clock with a multiple parents, if we set a range on
545 * that clock and the parent is changed, its rate after the reparenting
546 * is still within the range we asked for.
547 *
548 * FIXME: clk_set_parent() only does the reparenting but doesn't
549 * reevaluate whether the new clock rate is within its boundaries or
550 * not.
551 */
552static void
553clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
554{
555 struct clk_multiple_parent_ctx *ctx = test->priv;
556 struct clk_hw *hw = &ctx->hw;
557 struct clk *clk = clk_hw_get_clk(hw, NULL);
558 struct clk *parent1, *parent2;
559 unsigned long rate;
560 int ret;
561
562 kunit_skip(test, "This needs to be fixed in the core.");
563
564 parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
565 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
566 KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
567
568 parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
569 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
570
571 ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
572 KUNIT_ASSERT_EQ(test, ret, 0);
573
574 ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
575 KUNIT_ASSERT_EQ(test, ret, 0);
576
577 ret = clk_set_rate_range(clk,
578 DUMMY_CLOCK_RATE_1 - 1000,
579 DUMMY_CLOCK_RATE_1 + 1000);
580 KUNIT_ASSERT_EQ(test, ret, 0);
581
582 ret = clk_set_parent(clk, parent2);
583 KUNIT_ASSERT_EQ(test, ret, 0);
584
585 rate = clk_get_rate(clk);
586 KUNIT_ASSERT_GT(test, rate, 0);
587 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
588 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
589
590 clk_put(parent2);
591 clk_put(parent1);
592 clk_put(clk);
593}
594
595static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
596 KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
597 KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
598 KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
599 {}
600};
601
602/*
603 * Test suite for a basic mux clock with two parents, with
604 * CLK_SET_RATE_PARENT on the child.
605 *
606 * These tests exercise the consumer API and check that the state of the
607 * child and parents are sane and consistent.
608 */
609static struct kunit_suite
610clk_multiple_parents_mux_test_suite = {
611 .name = "clk-multiple-parents-mux-test",
612 .init = clk_multiple_parents_mux_test_init,
613 .exit = clk_multiple_parents_mux_test_exit,
614 .test_cases = clk_multiple_parents_mux_test_cases,
615};
616
617static int
618clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
619{
620 struct clk_multiple_parent_ctx *ctx;
621 const char *parents[2] = { "missing-parent", "proper-parent"};
622 int ret;
623
624 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
625 if (!ctx)
626 return -ENOMEM;
627 test->priv = ctx;
628
629 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
630 &clk_dummy_rate_ops,
631 0);
632 ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
633 ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
634 if (ret)
635 return ret;
636
637 ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
638 &clk_multiple_parents_mux_ops,
639 CLK_SET_RATE_PARENT);
640 ret = clk_hw_register(NULL, &ctx->hw);
641 if (ret)
642 return ret;
643
644 return 0;
645}
646
647static void
648clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
649{
650 struct clk_multiple_parent_ctx *ctx = test->priv;
651
652 clk_hw_unregister(&ctx->hw);
653 clk_hw_unregister(&ctx->parents_ctx[1].hw);
654}
655
656/*
657 * Test that, for a mux whose current parent hasn't been registered yet and is
658 * thus orphan, clk_get_parent() will return NULL.
659 */
660static void
661clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
662{
663 struct clk_multiple_parent_ctx *ctx = test->priv;
664 struct clk_hw *hw = &ctx->hw;
665 struct clk *clk = clk_hw_get_clk(hw, NULL);
666
667 KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
668
669 clk_put(clk);
670}
671
672/*
673 * Test that, for a mux whose current parent hasn't been registered yet,
674 * calling clk_set_parent() to a valid parent will properly update the
675 * mux parent and its orphan status.
676 */
677static void
678clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
679{
680 struct clk_multiple_parent_ctx *ctx = test->priv;
681 struct clk_hw *hw = &ctx->hw;
682 struct clk *clk = clk_hw_get_clk(hw, NULL);
683 struct clk *parent, *new_parent;
684 int ret;
685
686 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
687 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
688
689 ret = clk_set_parent(clk, parent);
690 KUNIT_ASSERT_EQ(test, ret, 0);
691
692 new_parent = clk_get_parent(clk);
693 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
694 KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
695
696 clk_put(parent);
697 clk_put(clk);
698}
699
700/*
701 * Test that, for a mux that started orphan but got switched to a valid
702 * parent, calling clk_drop_range() on the mux won't affect the parent
703 * rate.
704 */
705static void
706clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
707{
708 struct clk_multiple_parent_ctx *ctx = test->priv;
709 struct clk_hw *hw = &ctx->hw;
710 struct clk *clk = clk_hw_get_clk(hw, NULL);
711 struct clk *parent;
712 unsigned long parent_rate, new_parent_rate;
713 int ret;
714
715 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
716 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
717
718 parent_rate = clk_get_rate(parent);
719 KUNIT_ASSERT_GT(test, parent_rate, 0);
720
721 ret = clk_set_parent(clk, parent);
722 KUNIT_ASSERT_EQ(test, ret, 0);
723
724 ret = clk_drop_range(clk);
725 KUNIT_ASSERT_EQ(test, ret, 0);
726
727 new_parent_rate = clk_get_rate(clk);
728 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
729 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
730
731 clk_put(parent);
732 clk_put(clk);
733}
734
735/*
736 * Test that, for a mux that started orphan but got switched to a valid
737 * parent, the rate of the mux and its new parent are consistent.
738 */
739static void
740clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
741{
742 struct clk_multiple_parent_ctx *ctx = test->priv;
743 struct clk_hw *hw = &ctx->hw;
744 struct clk *clk = clk_hw_get_clk(hw, NULL);
745 struct clk *parent;
746 unsigned long parent_rate, rate;
747 int ret;
748
749 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
750 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
751
752 parent_rate = clk_get_rate(parent);
753 KUNIT_ASSERT_GT(test, parent_rate, 0);
754
755 ret = clk_set_parent(clk, parent);
756 KUNIT_ASSERT_EQ(test, ret, 0);
757
758 rate = clk_get_rate(clk);
759 KUNIT_ASSERT_GT(test, rate, 0);
760 KUNIT_EXPECT_EQ(test, parent_rate, rate);
761
762 clk_put(parent);
763 clk_put(clk);
764}
765
766/*
767 * Test that, for a mux that started orphan but got switched to a valid
768 * parent, calling clk_put() on the mux won't affect the parent rate.
769 */
770static void
771clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
772{
773 struct clk_multiple_parent_ctx *ctx = test->priv;
774 struct clk *clk, *parent;
775 unsigned long parent_rate, new_parent_rate;
776 int ret;
777
778 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
779 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
780
781 clk = clk_hw_get_clk(&ctx->hw, NULL);
782 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
783
784 parent_rate = clk_get_rate(parent);
785 KUNIT_ASSERT_GT(test, parent_rate, 0);
786
787 ret = clk_set_parent(clk, parent);
788 KUNIT_ASSERT_EQ(test, ret, 0);
789
790 clk_put(clk);
791
792 new_parent_rate = clk_get_rate(parent);
793 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
794 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
795
796 clk_put(parent);
797}
798
799/*
800 * Test that, for a mux that started orphan but got switched to a valid
801 * parent, calling clk_set_rate_range() will affect the parent state if
802 * its rate is out of range.
803 */
804static void
805clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
806{
807 struct clk_multiple_parent_ctx *ctx = test->priv;
808 struct clk_hw *hw = &ctx->hw;
809 struct clk *clk = clk_hw_get_clk(hw, NULL);
810 struct clk *parent;
811 unsigned long rate;
812 int ret;
813
814 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
815 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
816
817 ret = clk_set_parent(clk, parent);
818 KUNIT_ASSERT_EQ(test, ret, 0);
819
820 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
821 KUNIT_ASSERT_EQ(test, ret, 0);
822
823 rate = clk_get_rate(clk);
824 KUNIT_ASSERT_GT(test, rate, 0);
825 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
826 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
827
828 clk_put(parent);
829 clk_put(clk);
830}
831
832/*
833 * Test that, for a mux that started orphan but got switched to a valid
834 * parent, calling clk_set_rate_range() won't affect the parent state if
835 * its rate is within range.
836 */
837static void
838clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
839{
840 struct clk_multiple_parent_ctx *ctx = test->priv;
841 struct clk_hw *hw = &ctx->hw;
842 struct clk *clk = clk_hw_get_clk(hw, NULL);
843 struct clk *parent;
844 unsigned long parent_rate, new_parent_rate;
845 int ret;
846
847 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
848 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
849
850 parent_rate = clk_get_rate(parent);
851 KUNIT_ASSERT_GT(test, parent_rate, 0);
852
853 ret = clk_set_parent(clk, parent);
854 KUNIT_ASSERT_EQ(test, ret, 0);
855
856 ret = clk_set_rate_range(clk,
857 DUMMY_CLOCK_INIT_RATE - 1000,
858 DUMMY_CLOCK_INIT_RATE + 1000);
859 KUNIT_ASSERT_EQ(test, ret, 0);
860
861 new_parent_rate = clk_get_rate(parent);
862 KUNIT_ASSERT_GT(test, new_parent_rate, 0);
863 KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
864
865 clk_put(parent);
866 clk_put(clk);
867}
868
869/*
870 * Test that, for a mux whose current parent hasn't been registered yet,
871 * calling clk_set_rate_range() will succeed, and will be taken into
872 * account when rounding a rate.
873 */
874static void
875clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
876{
877 struct clk_multiple_parent_ctx *ctx = test->priv;
878 struct clk_hw *hw = &ctx->hw;
879 struct clk *clk = clk_hw_get_clk(hw, NULL);
880 long rate;
881 int ret;
882
883 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
884 KUNIT_ASSERT_EQ(test, ret, 0);
885
886 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
887 KUNIT_ASSERT_GT(test, rate, 0);
888 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
889 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
890
891 clk_put(clk);
892}
893
894/*
895 * Test that, for a mux that started orphan, was assigned and rate and
896 * then got switched to a valid parent, its rate is eventually within
897 * range.
898 *
899 * FIXME: Even though we update the rate as part of clk_set_parent(), we
900 * don't evaluate whether that new rate is within range and needs to be
901 * adjusted.
902 */
903static void
904clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
905{
906 struct clk_multiple_parent_ctx *ctx = test->priv;
907 struct clk_hw *hw = &ctx->hw;
908 struct clk *clk = clk_hw_get_clk(hw, NULL);
909 struct clk *parent;
910 unsigned long rate;
911 int ret;
912
913 kunit_skip(test, "This needs to be fixed in the core.");
914
915 clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
916
917 parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
918 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
919
920 ret = clk_set_parent(clk, parent);
921 KUNIT_ASSERT_EQ(test, ret, 0);
922
923 rate = clk_get_rate(clk);
924 KUNIT_ASSERT_GT(test, rate, 0);
925 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
926 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
927
928 clk_put(parent);
929 clk_put(clk);
930}
931
932static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
933 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
934 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
935 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
936 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
937 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
938 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
939 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
940 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
941 KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
942 {}
943};
944
945/*
946 * Test suite for a basic mux clock with two parents. The default parent
947 * isn't registered, only the second parent is. By default, the clock
948 * will thus be orphan.
949 *
950 * These tests exercise the behaviour of the consumer API when dealing
951 * with an orphan clock, and how we deal with the transition to a valid
952 * parent.
953 */
954static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
955 .name = "clk-orphan-transparent-multiple-parent-mux-test",
956 .init = clk_orphan_transparent_multiple_parent_mux_test_init,
957 .exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
958 .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
959};
960
961struct clk_single_parent_ctx {
962 struct clk_dummy_context parent_ctx;
963 struct clk_hw hw;
964};
965
966static int clk_single_parent_mux_test_init(struct kunit *test)
967{
968 struct clk_single_parent_ctx *ctx;
969 int ret;
970
971 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
972 if (!ctx)
973 return -ENOMEM;
974 test->priv = ctx;
975
976 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
977 ctx->parent_ctx.hw.init =
978 CLK_HW_INIT_NO_PARENT("parent-clk",
979 &clk_dummy_rate_ops,
980 0);
981
982 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
983 if (ret)
984 return ret;
985
986 ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
987 &clk_dummy_single_parent_ops,
988 CLK_SET_RATE_PARENT);
989
990 ret = clk_hw_register(NULL, &ctx->hw);
991 if (ret)
992 return ret;
993
994 return 0;
995}
996
997static void
998clk_single_parent_mux_test_exit(struct kunit *test)
999{
1000 struct clk_single_parent_ctx *ctx = test->priv;
1001
1002 clk_hw_unregister(&ctx->hw);
1003 clk_hw_unregister(&ctx->parent_ctx.hw);
1004}
1005
1006/*
1007 * Test that for a clock with a single parent, clk_get_parent() actually
1008 * returns the parent.
1009 */
1010static void
1011clk_test_single_parent_mux_get_parent(struct kunit *test)
1012{
1013 struct clk_single_parent_ctx *ctx = test->priv;
1014 struct clk_hw *hw = &ctx->hw;
1015 struct clk *clk = clk_hw_get_clk(hw, NULL);
1016 struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1017
1018 KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
1019
1020 clk_put(parent);
1021 clk_put(clk);
1022}
1023
1024/*
1025 * Test that for a clock with a single parent, clk_has_parent() actually
1026 * reports it as a parent.
1027 */
1028static void
1029clk_test_single_parent_mux_has_parent(struct kunit *test)
1030{
1031 struct clk_single_parent_ctx *ctx = test->priv;
1032 struct clk_hw *hw = &ctx->hw;
1033 struct clk *clk = clk_hw_get_clk(hw, NULL);
1034 struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1035
1036 KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1037
1038 clk_put(parent);
1039 clk_put(clk);
1040}
1041
1042/*
1043 * Test that for a clock that can't modify its rate and with a single
1044 * parent, if we set disjoints range on the parent and then the child,
1045 * the second will return an error.
1046 *
1047 * FIXME: clk_set_rate_range() only considers the current clock when
1048 * evaluating whether ranges are disjoints and not the upstream clocks
1049 * ranges.
1050 */
1051static void
1052clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1053{
1054 struct clk_single_parent_ctx *ctx = test->priv;
1055 struct clk_hw *hw = &ctx->hw;
1056 struct clk *clk = clk_hw_get_clk(hw, NULL);
1057 struct clk *parent;
1058 int ret;
1059
1060 kunit_skip(test, "This needs to be fixed in the core.");
1061
1062 parent = clk_get_parent(clk);
1063 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1064
1065 ret = clk_set_rate_range(parent, 1000, 2000);
1066 KUNIT_ASSERT_EQ(test, ret, 0);
1067
1068 ret = clk_set_rate_range(clk, 3000, 4000);
1069 KUNIT_EXPECT_LT(test, ret, 0);
1070
1071 clk_put(clk);
1072}
1073
1074/*
1075 * Test that for a clock that can't modify its rate and with a single
1076 * parent, if we set disjoints range on the child and then the parent,
1077 * the second will return an error.
1078 *
1079 * FIXME: clk_set_rate_range() only considers the current clock when
1080 * evaluating whether ranges are disjoints and not the downstream clocks
1081 * ranges.
1082 */
1083static void
1084clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1085{
1086 struct clk_single_parent_ctx *ctx = test->priv;
1087 struct clk_hw *hw = &ctx->hw;
1088 struct clk *clk = clk_hw_get_clk(hw, NULL);
1089 struct clk *parent;
1090 int ret;
1091
1092 kunit_skip(test, "This needs to be fixed in the core.");
1093
1094 parent = clk_get_parent(clk);
1095 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1096
1097 ret = clk_set_rate_range(clk, 1000, 2000);
1098 KUNIT_ASSERT_EQ(test, ret, 0);
1099
1100 ret = clk_set_rate_range(parent, 3000, 4000);
1101 KUNIT_EXPECT_LT(test, ret, 0);
1102
1103 clk_put(clk);
1104}
1105
1106/*
1107 * Test that for a clock that can't modify its rate and with a single
1108 * parent, if we set a range on the parent and then call
1109 * clk_round_rate(), the boundaries of the parent are taken into
1110 * account.
1111 */
1112static void
1113clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1114{
1115 struct clk_single_parent_ctx *ctx = test->priv;
1116 struct clk_hw *hw = &ctx->hw;
1117 struct clk *clk = clk_hw_get_clk(hw, NULL);
1118 struct clk *parent;
1119 long rate;
1120 int ret;
1121
1122 parent = clk_get_parent(clk);
1123 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1124
1125 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1126 KUNIT_ASSERT_EQ(test, ret, 0);
1127
1128 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1129 KUNIT_ASSERT_GT(test, rate, 0);
1130 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1131 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1132
1133 clk_put(clk);
1134}
1135
1136/*
1137 * Test that for a clock that can't modify its rate and with a single
1138 * parent, if we set a range on the parent and a more restrictive one on
1139 * the child, and then call clk_round_rate(), the boundaries of the
1140 * two clocks are taken into account.
1141 */
1142static void
1143clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1144{
1145 struct clk_single_parent_ctx *ctx = test->priv;
1146 struct clk_hw *hw = &ctx->hw;
1147 struct clk *clk = clk_hw_get_clk(hw, NULL);
1148 struct clk *parent;
1149 long rate;
1150 int ret;
1151
1152 parent = clk_get_parent(clk);
1153 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1154
1155 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1156 KUNIT_ASSERT_EQ(test, ret, 0);
1157
1158 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1159 KUNIT_ASSERT_EQ(test, ret, 0);
1160
1161 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1162 KUNIT_ASSERT_GT(test, rate, 0);
1163 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1164 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1165
1166 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1167 KUNIT_ASSERT_GT(test, rate, 0);
1168 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1169 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1170
1171 clk_put(clk);
1172}
1173
1174/*
1175 * Test that for a clock that can't modify its rate and with a single
1176 * parent, if we set a range on the child and a more restrictive one on
1177 * the parent, and then call clk_round_rate(), the boundaries of the
1178 * two clocks are taken into account.
1179 */
1180static void
1181clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1182{
1183 struct clk_single_parent_ctx *ctx = test->priv;
1184 struct clk_hw *hw = &ctx->hw;
1185 struct clk *clk = clk_hw_get_clk(hw, NULL);
1186 struct clk *parent;
1187 long rate;
1188 int ret;
1189
1190 parent = clk_get_parent(clk);
1191 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1192
1193 ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1194 KUNIT_ASSERT_EQ(test, ret, 0);
1195
1196 ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1197 KUNIT_ASSERT_EQ(test, ret, 0);
1198
1199 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1200 KUNIT_ASSERT_GT(test, rate, 0);
1201 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1202 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1203
1204 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1205 KUNIT_ASSERT_GT(test, rate, 0);
1206 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1207 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1208
1209 clk_put(clk);
1210}
1211
1212static struct kunit_case clk_single_parent_mux_test_cases[] = {
1213 KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1214 KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1215 KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1216 KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1217 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1218 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1219 KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1220 {}
1221};
1222
1223/*
1224 * Test suite for a basic mux clock with one parent, with
1225 * CLK_SET_RATE_PARENT on the child.
1226 *
1227 * These tests exercise the consumer API and check that the state of the
1228 * child and parent are sane and consistent.
1229 */
1230static struct kunit_suite
1231clk_single_parent_mux_test_suite = {
1232 .name = "clk-single-parent-mux-test",
1233 .init = clk_single_parent_mux_test_init,
1234 .exit = clk_single_parent_mux_test_exit,
1235 .test_cases = clk_single_parent_mux_test_cases,
1236};
1237
1238static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1239{
1240 struct clk_single_parent_ctx *ctx;
1241 struct clk_init_data init = { };
1242 const char * const parents[] = { "orphan_parent" };
1243 int ret;
1244
1245 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1246 if (!ctx)
1247 return -ENOMEM;
1248 test->priv = ctx;
1249
1250 init.name = "test_orphan_dummy_parent";
1251 init.ops = &clk_dummy_single_parent_ops;
1252 init.parent_names = parents;
1253 init.num_parents = ARRAY_SIZE(parents);
1254 init.flags = CLK_SET_RATE_PARENT;
1255 ctx->hw.init = &init;
1256
1257 ret = clk_hw_register(NULL, &ctx->hw);
1258 if (ret)
1259 return ret;
1260
1261 memset(&init, 0, sizeof(init));
1262 init.name = "orphan_parent";
1263 init.ops = &clk_dummy_rate_ops;
1264 ctx->parent_ctx.hw.init = &init;
1265 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1266
1267 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1268 if (ret)
1269 return ret;
1270
1271 return 0;
1272}
1273
1274/*
1275 * Test that a mux-only clock, with an initial rate within a range,
1276 * will still have the same rate after the range has been enforced.
1277 *
1278 * See:
1279 * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1280 */
1281static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1282{
1283 struct clk_single_parent_ctx *ctx = test->priv;
1284 struct clk_hw *hw = &ctx->hw;
1285 struct clk *clk = clk_hw_get_clk(hw, NULL);
1286 unsigned long rate, new_rate;
1287
1288 rate = clk_get_rate(clk);
1289 KUNIT_ASSERT_GT(test, rate, 0);
1290
1291 KUNIT_ASSERT_EQ(test,
1292 clk_set_rate_range(clk,
1293 ctx->parent_ctx.rate - 1000,
1294 ctx->parent_ctx.rate + 1000),
1295 0);
1296
1297 new_rate = clk_get_rate(clk);
1298 KUNIT_ASSERT_GT(test, new_rate, 0);
1299 KUNIT_EXPECT_EQ(test, rate, new_rate);
1300
1301 clk_put(clk);
1302}
1303
1304static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1305 KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1306 {}
1307};
1308
1309/*
1310 * Test suite for a basic mux clock with one parent. The parent is
1311 * registered after its child. The clock will thus be an orphan when
1312 * registered, but will no longer be when the tests run.
1313 *
1314 * These tests make sure a clock that used to be orphan has a sane,
1315 * consistent, behaviour.
1316 */
1317static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1318 .name = "clk-orphan-transparent-single-parent-test",
1319 .init = clk_orphan_transparent_single_parent_mux_test_init,
1320 .exit = clk_single_parent_mux_test_exit,
1321 .test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1322};
1323
1324struct clk_single_parent_two_lvl_ctx {
1325 struct clk_dummy_context parent_parent_ctx;
1326 struct clk_dummy_context parent_ctx;
1327 struct clk_hw hw;
1328};
1329
1330static int
1331clk_orphan_two_level_root_last_test_init(struct kunit *test)
1332{
1333 struct clk_single_parent_two_lvl_ctx *ctx;
1334 int ret;
1335
1336 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1337 if (!ctx)
1338 return -ENOMEM;
1339 test->priv = ctx;
1340
1341 ctx->parent_ctx.hw.init =
1342 CLK_HW_INIT("intermediate-parent",
1343 "root-parent",
1344 &clk_dummy_single_parent_ops,
1345 CLK_SET_RATE_PARENT);
1346 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1347 if (ret)
1348 return ret;
1349
1350 ctx->hw.init =
1351 CLK_HW_INIT("test-clk", "intermediate-parent",
1352 &clk_dummy_single_parent_ops,
1353 CLK_SET_RATE_PARENT);
1354 ret = clk_hw_register(NULL, &ctx->hw);
1355 if (ret)
1356 return ret;
1357
1358 ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1359 ctx->parent_parent_ctx.hw.init =
1360 CLK_HW_INIT_NO_PARENT("root-parent",
1361 &clk_dummy_rate_ops,
1362 0);
1363 ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1364 if (ret)
1365 return ret;
1366
1367 return 0;
1368}
1369
1370static void
1371clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1372{
1373 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1374
1375 clk_hw_unregister(&ctx->hw);
1376 clk_hw_unregister(&ctx->parent_ctx.hw);
1377 clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1378}
1379
1380/*
1381 * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1382 * will return the proper rate.
1383 */
1384static void
1385clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1386{
1387 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1388 struct clk_hw *hw = &ctx->hw;
1389 struct clk *clk = clk_hw_get_clk(hw, NULL);
1390 unsigned long rate;
1391
1392 rate = clk_get_rate(clk);
1393 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1394
1395 clk_put(clk);
1396}
1397
1398/*
1399 * Test that, for a clock whose parent used to be orphan,
1400 * clk_set_rate_range() won't affect its rate if it is already within
1401 * range.
1402 *
1403 * See (for Exynos 4210):
1404 * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1405 */
1406static void
1407clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1408{
1409 struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1410 struct clk_hw *hw = &ctx->hw;
1411 struct clk *clk = clk_hw_get_clk(hw, NULL);
1412 unsigned long rate;
1413 int ret;
1414
1415 ret = clk_set_rate_range(clk,
1416 DUMMY_CLOCK_INIT_RATE - 1000,
1417 DUMMY_CLOCK_INIT_RATE + 1000);
1418 KUNIT_ASSERT_EQ(test, ret, 0);
1419
1420 rate = clk_get_rate(clk);
1421 KUNIT_ASSERT_GT(test, rate, 0);
1422 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1423
1424 clk_put(clk);
1425}
1426
1427static struct kunit_case
1428clk_orphan_two_level_root_last_test_cases[] = {
1429 KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1430 KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1431 {}
1432};
1433
1434/*
1435 * Test suite for a basic, transparent, clock with a parent that is also
1436 * such a clock. The parent's parent is registered last, while the
1437 * parent and its child are registered in that order. The intermediate
1438 * and leaf clocks will thus be orphan when registered, but the leaf
1439 * clock itself will always have its parent and will never be
1440 * reparented. Indeed, it's only orphan because its parent is.
1441 *
1442 * These tests exercise the behaviour of the consumer API when dealing
1443 * with an orphan clock, and how we deal with the transition to a valid
1444 * parent.
1445 */
1446static struct kunit_suite
1447clk_orphan_two_level_root_last_test_suite = {
1448 .name = "clk-orphan-two-level-root-last-test",
1449 .init = clk_orphan_two_level_root_last_test_init,
1450 .exit = clk_orphan_two_level_root_last_test_exit,
1451 .test_cases = clk_orphan_two_level_root_last_test_cases,
1452};
1453
1454/*
1455 * Test that clk_set_rate_range won't return an error for a valid range
1456 * and that it will make sure the rate of the clock is within the
1457 * boundaries.
1458 */
1459static void clk_range_test_set_range(struct kunit *test)
1460{
1461 struct clk_dummy_context *ctx = test->priv;
1462 struct clk_hw *hw = &ctx->hw;
1463 struct clk *clk = clk_hw_get_clk(hw, NULL);
1464 unsigned long rate;
1465
1466 KUNIT_ASSERT_EQ(test,
1467 clk_set_rate_range(clk,
1468 DUMMY_CLOCK_RATE_1,
1469 DUMMY_CLOCK_RATE_2),
1470 0);
1471
1472 rate = clk_get_rate(clk);
1473 KUNIT_ASSERT_GT(test, rate, 0);
1474 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1475 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1476
1477 clk_put(clk);
1478}
1479
1480/*
1481 * Test that calling clk_set_rate_range with a minimum rate higher than
1482 * the maximum rate returns an error.
1483 */
1484static void clk_range_test_set_range_invalid(struct kunit *test)
1485{
1486 struct clk_dummy_context *ctx = test->priv;
1487 struct clk_hw *hw = &ctx->hw;
1488 struct clk *clk = clk_hw_get_clk(hw, NULL);
1489
1490 KUNIT_EXPECT_LT(test,
1491 clk_set_rate_range(clk,
1492 DUMMY_CLOCK_RATE_1 + 1000,
1493 DUMMY_CLOCK_RATE_1),
1494 0);
1495
1496 clk_put(clk);
1497}
1498
1499/*
1500 * Test that users can't set multiple, disjoints, range that would be
1501 * impossible to meet.
1502 */
1503static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1504{
1505 struct clk_dummy_context *ctx = test->priv;
1506 struct clk_hw *hw = &ctx->hw;
1507 struct clk *user1, *user2;
1508
1509 user1 = clk_hw_get_clk(hw, NULL);
1510 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1511
1512 user2 = clk_hw_get_clk(hw, NULL);
1513 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1514
1515 KUNIT_ASSERT_EQ(test,
1516 clk_set_rate_range(user1, 1000, 2000),
1517 0);
1518
1519 KUNIT_EXPECT_LT(test,
1520 clk_set_rate_range(user2, 3000, 4000),
1521 0);
1522
1523 clk_put(user2);
1524 clk_put(user1);
1525}
1526
1527/*
1528 * Test that if our clock has some boundaries and we try to round a rate
1529 * lower than the minimum, the returned rate will be within range.
1530 */
1531static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1532{
1533 struct clk_dummy_context *ctx = test->priv;
1534 struct clk_hw *hw = &ctx->hw;
1535 struct clk *clk = clk_hw_get_clk(hw, NULL);
1536 long rate;
1537
1538 KUNIT_ASSERT_EQ(test,
1539 clk_set_rate_range(clk,
1540 DUMMY_CLOCK_RATE_1,
1541 DUMMY_CLOCK_RATE_2),
1542 0);
1543
1544 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1545 KUNIT_ASSERT_GT(test, rate, 0);
1546 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1547 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1548
1549 clk_put(clk);
1550}
1551
1552/*
1553 * Test that if our clock has some boundaries and we try to set a rate
1554 * higher than the maximum, the new rate will be within range.
1555 */
1556static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1557{
1558 struct clk_dummy_context *ctx = test->priv;
1559 struct clk_hw *hw = &ctx->hw;
1560 struct clk *clk = clk_hw_get_clk(hw, NULL);
1561 unsigned long rate;
1562
1563 KUNIT_ASSERT_EQ(test,
1564 clk_set_rate_range(clk,
1565 DUMMY_CLOCK_RATE_1,
1566 DUMMY_CLOCK_RATE_2),
1567 0);
1568
1569 KUNIT_ASSERT_EQ(test,
1570 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1571 0);
1572
1573 rate = clk_get_rate(clk);
1574 KUNIT_ASSERT_GT(test, rate, 0);
1575 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1576 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1577
1578 clk_put(clk);
1579}
1580
1581/*
1582 * Test that if our clock has some boundaries and we try to round and
1583 * set a rate lower than the minimum, the rate returned by
1584 * clk_round_rate() will be consistent with the new rate set by
1585 * clk_set_rate().
1586 */
1587static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1588{
1589 struct clk_dummy_context *ctx = test->priv;
1590 struct clk_hw *hw = &ctx->hw;
1591 struct clk *clk = clk_hw_get_clk(hw, NULL);
1592 long rounded;
1593
1594 KUNIT_ASSERT_EQ(test,
1595 clk_set_rate_range(clk,
1596 DUMMY_CLOCK_RATE_1,
1597 DUMMY_CLOCK_RATE_2),
1598 0);
1599
1600 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1601 KUNIT_ASSERT_GT(test, rounded, 0);
1602
1603 KUNIT_ASSERT_EQ(test,
1604 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1605 0);
1606
1607 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1608
1609 clk_put(clk);
1610}
1611
1612/*
1613 * Test that if our clock has some boundaries and we try to round a rate
1614 * higher than the maximum, the returned rate will be within range.
1615 */
1616static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1617{
1618 struct clk_dummy_context *ctx = test->priv;
1619 struct clk_hw *hw = &ctx->hw;
1620 struct clk *clk = clk_hw_get_clk(hw, NULL);
1621 long rate;
1622
1623 KUNIT_ASSERT_EQ(test,
1624 clk_set_rate_range(clk,
1625 DUMMY_CLOCK_RATE_1,
1626 DUMMY_CLOCK_RATE_2),
1627 0);
1628
1629 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1630 KUNIT_ASSERT_GT(test, rate, 0);
1631 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1632 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1633
1634 clk_put(clk);
1635}
1636
1637/*
1638 * Test that if our clock has some boundaries and we try to set a rate
1639 * higher than the maximum, the new rate will be within range.
1640 */
1641static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1642{
1643 struct clk_dummy_context *ctx = test->priv;
1644 struct clk_hw *hw = &ctx->hw;
1645 struct clk *clk = clk_hw_get_clk(hw, NULL);
1646 unsigned long rate;
1647
1648 KUNIT_ASSERT_EQ(test,
1649 clk_set_rate_range(clk,
1650 DUMMY_CLOCK_RATE_1,
1651 DUMMY_CLOCK_RATE_2),
1652 0);
1653
1654 KUNIT_ASSERT_EQ(test,
1655 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1656 0);
1657
1658 rate = clk_get_rate(clk);
1659 KUNIT_ASSERT_GT(test, rate, 0);
1660 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1661 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1662
1663 clk_put(clk);
1664}
1665
1666/*
1667 * Test that if our clock has some boundaries and we try to round and
1668 * set a rate higher than the maximum, the rate returned by
1669 * clk_round_rate() will be consistent with the new rate set by
1670 * clk_set_rate().
1671 */
1672static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1673{
1674 struct clk_dummy_context *ctx = test->priv;
1675 struct clk_hw *hw = &ctx->hw;
1676 struct clk *clk = clk_hw_get_clk(hw, NULL);
1677 long rounded;
1678
1679 KUNIT_ASSERT_EQ(test,
1680 clk_set_rate_range(clk,
1681 DUMMY_CLOCK_RATE_1,
1682 DUMMY_CLOCK_RATE_2),
1683 0);
1684
1685 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1686 KUNIT_ASSERT_GT(test, rounded, 0);
1687
1688 KUNIT_ASSERT_EQ(test,
1689 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1690 0);
1691
1692 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1693
1694 clk_put(clk);
1695}
1696
1697/*
1698 * Test that if our clock has a rate lower than the minimum set by a
1699 * call to clk_set_rate_range(), the rate will be raised to match the
1700 * new minimum.
1701 *
1702 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1703 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1704 */
1705static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1706{
1707 struct clk_dummy_context *ctx = test->priv;
1708 struct clk_hw *hw = &ctx->hw;
1709 struct clk *clk = clk_hw_get_clk(hw, NULL);
1710 unsigned long rate;
1711
1712 KUNIT_ASSERT_EQ(test,
1713 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1714 0);
1715
1716 KUNIT_ASSERT_EQ(test,
1717 clk_set_rate_range(clk,
1718 DUMMY_CLOCK_RATE_1,
1719 DUMMY_CLOCK_RATE_2),
1720 0);
1721
1722 rate = clk_get_rate(clk);
1723 KUNIT_ASSERT_GT(test, rate, 0);
1724 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1725
1726 clk_put(clk);
1727}
1728
1729/*
1730 * Test that if our clock has a rate higher than the maximum set by a
1731 * call to clk_set_rate_range(), the rate will be lowered to match the
1732 * new maximum.
1733 *
1734 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1735 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1736 */
1737static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1738{
1739 struct clk_dummy_context *ctx = test->priv;
1740 struct clk_hw *hw = &ctx->hw;
1741 struct clk *clk = clk_hw_get_clk(hw, NULL);
1742 unsigned long rate;
1743
1744 KUNIT_ASSERT_EQ(test,
1745 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1746 0);
1747
1748 KUNIT_ASSERT_EQ(test,
1749 clk_set_rate_range(clk,
1750 DUMMY_CLOCK_RATE_1,
1751 DUMMY_CLOCK_RATE_2),
1752 0);
1753
1754 rate = clk_get_rate(clk);
1755 KUNIT_ASSERT_GT(test, rate, 0);
1756 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1757
1758 clk_put(clk);
1759}
1760
1761static struct kunit_case clk_range_test_cases[] = {
1762 KUNIT_CASE(clk_range_test_set_range),
1763 KUNIT_CASE(clk_range_test_set_range_invalid),
1764 KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1765 KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1766 KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1767 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1768 KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1769 KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1770 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1771 KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1772 KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1773 {}
1774};
1775
1776/*
1777 * Test suite for a basic rate clock, without any parent.
1778 *
1779 * These tests exercise the rate range API: clk_set_rate_range(),
1780 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1781 */
1782static struct kunit_suite clk_range_test_suite = {
1783 .name = "clk-range-test",
1784 .init = clk_test_init,
1785 .exit = clk_test_exit,
1786 .test_cases = clk_range_test_cases,
1787};
1788
1789/*
1790 * Test that if we have several subsequent calls to
1791 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1792 * needed each and every time.
1793 *
1794 * With clk_dummy_maximize_rate_ops, this means that the rate will
1795 * trail along the maximum as it evolves.
1796 */
1797static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1798{
1799 struct clk_dummy_context *ctx = test->priv;
1800 struct clk_hw *hw = &ctx->hw;
1801 struct clk *clk = clk_hw_get_clk(hw, NULL);
1802 unsigned long rate;
1803
1804 KUNIT_ASSERT_EQ(test,
1805 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1806 0);
1807
1808 KUNIT_ASSERT_EQ(test,
1809 clk_set_rate_range(clk,
1810 DUMMY_CLOCK_RATE_1,
1811 DUMMY_CLOCK_RATE_2),
1812 0);
1813
1814 rate = clk_get_rate(clk);
1815 KUNIT_ASSERT_GT(test, rate, 0);
1816 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1817
1818 KUNIT_ASSERT_EQ(test,
1819 clk_set_rate_range(clk,
1820 DUMMY_CLOCK_RATE_1,
1821 DUMMY_CLOCK_RATE_2 - 1000),
1822 0);
1823
1824 rate = clk_get_rate(clk);
1825 KUNIT_ASSERT_GT(test, rate, 0);
1826 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1827
1828 KUNIT_ASSERT_EQ(test,
1829 clk_set_rate_range(clk,
1830 DUMMY_CLOCK_RATE_1,
1831 DUMMY_CLOCK_RATE_2),
1832 0);
1833
1834 rate = clk_get_rate(clk);
1835 KUNIT_ASSERT_GT(test, rate, 0);
1836 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1837
1838 clk_put(clk);
1839}
1840
1841/*
1842 * Test that if we have several subsequent calls to
1843 * clk_set_rate_range(), across multiple users, the core will reevaluate
1844 * whether a new rate is needed each and every time.
1845 *
1846 * With clk_dummy_maximize_rate_ops, this means that the rate will
1847 * trail along the maximum as it evolves.
1848 */
1849static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1850{
1851 struct clk_dummy_context *ctx = test->priv;
1852 struct clk_hw *hw = &ctx->hw;
1853 struct clk *clk = clk_hw_get_clk(hw, NULL);
1854 struct clk *user1, *user2;
1855 unsigned long rate;
1856
1857 user1 = clk_hw_get_clk(hw, NULL);
1858 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1859
1860 user2 = clk_hw_get_clk(hw, NULL);
1861 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1862
1863 KUNIT_ASSERT_EQ(test,
1864 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1865 0);
1866
1867 KUNIT_ASSERT_EQ(test,
1868 clk_set_rate_range(user1,
1869 0,
1870 DUMMY_CLOCK_RATE_2),
1871 0);
1872
1873 rate = clk_get_rate(clk);
1874 KUNIT_ASSERT_GT(test, rate, 0);
1875 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1876
1877 KUNIT_ASSERT_EQ(test,
1878 clk_set_rate_range(user2,
1879 0,
1880 DUMMY_CLOCK_RATE_1),
1881 0);
1882
1883 rate = clk_get_rate(clk);
1884 KUNIT_ASSERT_GT(test, rate, 0);
1885 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1886
1887 KUNIT_ASSERT_EQ(test,
1888 clk_drop_range(user2),
1889 0);
1890
1891 rate = clk_get_rate(clk);
1892 KUNIT_ASSERT_GT(test, rate, 0);
1893 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1894
1895 clk_put(user2);
1896 clk_put(user1);
1897 clk_put(clk);
1898}
1899
1900/*
1901 * Test that if we have several subsequent calls to
1902 * clk_set_rate_range(), across multiple users, the core will reevaluate
1903 * whether a new rate is needed, including when a user drop its clock.
1904 *
1905 * With clk_dummy_maximize_rate_ops, this means that the rate will
1906 * trail along the maximum as it evolves.
1907 */
1908static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1909{
1910 struct clk_dummy_context *ctx = test->priv;
1911 struct clk_hw *hw = &ctx->hw;
1912 struct clk *clk = clk_hw_get_clk(hw, NULL);
1913 struct clk *user1, *user2;
1914 unsigned long rate;
1915
1916 user1 = clk_hw_get_clk(hw, NULL);
1917 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1918
1919 user2 = clk_hw_get_clk(hw, NULL);
1920 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1921
1922 KUNIT_ASSERT_EQ(test,
1923 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1924 0);
1925
1926 KUNIT_ASSERT_EQ(test,
1927 clk_set_rate_range(user1,
1928 0,
1929 DUMMY_CLOCK_RATE_2),
1930 0);
1931
1932 rate = clk_get_rate(clk);
1933 KUNIT_ASSERT_GT(test, rate, 0);
1934 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1935
1936 KUNIT_ASSERT_EQ(test,
1937 clk_set_rate_range(user2,
1938 0,
1939 DUMMY_CLOCK_RATE_1),
1940 0);
1941
1942 rate = clk_get_rate(clk);
1943 KUNIT_ASSERT_GT(test, rate, 0);
1944 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1945
1946 clk_put(user2);
1947
1948 rate = clk_get_rate(clk);
1949 KUNIT_ASSERT_GT(test, rate, 0);
1950 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1951
1952 clk_put(user1);
1953 clk_put(clk);
1954}
1955
1956static struct kunit_case clk_range_maximize_test_cases[] = {
1957 KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1958 KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1959 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1960 {}
1961};
1962
1963/*
1964 * Test suite for a basic rate clock, without any parent.
1965 *
1966 * These tests exercise the rate range API: clk_set_rate_range(),
1967 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1968 * driver that will always try to run at the highest possible rate.
1969 */
1970static struct kunit_suite clk_range_maximize_test_suite = {
1971 .name = "clk-range-maximize-test",
1972 .init = clk_maximize_test_init,
1973 .exit = clk_test_exit,
1974 .test_cases = clk_range_maximize_test_cases,
1975};
1976
1977/*
1978 * Test that if we have several subsequent calls to
1979 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1980 * needed each and every time.
1981 *
1982 * With clk_dummy_minimize_rate_ops, this means that the rate will
1983 * trail along the minimum as it evolves.
1984 */
1985static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1986{
1987 struct clk_dummy_context *ctx = test->priv;
1988 struct clk_hw *hw = &ctx->hw;
1989 struct clk *clk = clk_hw_get_clk(hw, NULL);
1990 unsigned long rate;
1991
1992 KUNIT_ASSERT_EQ(test,
1993 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1994 0);
1995
1996 KUNIT_ASSERT_EQ(test,
1997 clk_set_rate_range(clk,
1998 DUMMY_CLOCK_RATE_1,
1999 DUMMY_CLOCK_RATE_2),
2000 0);
2001
2002 rate = clk_get_rate(clk);
2003 KUNIT_ASSERT_GT(test, rate, 0);
2004 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2005
2006 KUNIT_ASSERT_EQ(test,
2007 clk_set_rate_range(clk,
2008 DUMMY_CLOCK_RATE_1 + 1000,
2009 DUMMY_CLOCK_RATE_2),
2010 0);
2011
2012 rate = clk_get_rate(clk);
2013 KUNIT_ASSERT_GT(test, rate, 0);
2014 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
2015
2016 KUNIT_ASSERT_EQ(test,
2017 clk_set_rate_range(clk,
2018 DUMMY_CLOCK_RATE_1,
2019 DUMMY_CLOCK_RATE_2),
2020 0);
2021
2022 rate = clk_get_rate(clk);
2023 KUNIT_ASSERT_GT(test, rate, 0);
2024 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2025
2026 clk_put(clk);
2027}
2028
2029/*
2030 * Test that if we have several subsequent calls to
2031 * clk_set_rate_range(), across multiple users, the core will reevaluate
2032 * whether a new rate is needed each and every time.
2033 *
2034 * With clk_dummy_minimize_rate_ops, this means that the rate will
2035 * trail along the minimum as it evolves.
2036 */
2037static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2038{
2039 struct clk_dummy_context *ctx = test->priv;
2040 struct clk_hw *hw = &ctx->hw;
2041 struct clk *clk = clk_hw_get_clk(hw, NULL);
2042 struct clk *user1, *user2;
2043 unsigned long rate;
2044
2045 user1 = clk_hw_get_clk(hw, NULL);
2046 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2047
2048 user2 = clk_hw_get_clk(hw, NULL);
2049 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2050
2051 KUNIT_ASSERT_EQ(test,
2052 clk_set_rate_range(user1,
2053 DUMMY_CLOCK_RATE_1,
2054 ULONG_MAX),
2055 0);
2056
2057 rate = clk_get_rate(clk);
2058 KUNIT_ASSERT_GT(test, rate, 0);
2059 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2060
2061 KUNIT_ASSERT_EQ(test,
2062 clk_set_rate_range(user2,
2063 DUMMY_CLOCK_RATE_2,
2064 ULONG_MAX),
2065 0);
2066
2067 rate = clk_get_rate(clk);
2068 KUNIT_ASSERT_GT(test, rate, 0);
2069 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2070
2071 KUNIT_ASSERT_EQ(test,
2072 clk_drop_range(user2),
2073 0);
2074
2075 rate = clk_get_rate(clk);
2076 KUNIT_ASSERT_GT(test, rate, 0);
2077 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2078
2079 clk_put(user2);
2080 clk_put(user1);
2081 clk_put(clk);
2082}
2083
2084/*
2085 * Test that if we have several subsequent calls to
2086 * clk_set_rate_range(), across multiple users, the core will reevaluate
2087 * whether a new rate is needed, including when a user drop its clock.
2088 *
2089 * With clk_dummy_minimize_rate_ops, this means that the rate will
2090 * trail along the minimum as it evolves.
2091 */
2092static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2093{
2094 struct clk_dummy_context *ctx = test->priv;
2095 struct clk_hw *hw = &ctx->hw;
2096 struct clk *clk = clk_hw_get_clk(hw, NULL);
2097 struct clk *user1, *user2;
2098 unsigned long rate;
2099
2100 user1 = clk_hw_get_clk(hw, NULL);
2101 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2102
2103 user2 = clk_hw_get_clk(hw, NULL);
2104 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2105
2106 KUNIT_ASSERT_EQ(test,
2107 clk_set_rate_range(user1,
2108 DUMMY_CLOCK_RATE_1,
2109 ULONG_MAX),
2110 0);
2111
2112 rate = clk_get_rate(clk);
2113 KUNIT_ASSERT_GT(test, rate, 0);
2114 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2115
2116 KUNIT_ASSERT_EQ(test,
2117 clk_set_rate_range(user2,
2118 DUMMY_CLOCK_RATE_2,
2119 ULONG_MAX),
2120 0);
2121
2122 rate = clk_get_rate(clk);
2123 KUNIT_ASSERT_GT(test, rate, 0);
2124 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2125
2126 clk_put(user2);
2127
2128 rate = clk_get_rate(clk);
2129 KUNIT_ASSERT_GT(test, rate, 0);
2130 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2131
2132 clk_put(user1);
2133 clk_put(clk);
2134}
2135
2136static struct kunit_case clk_range_minimize_test_cases[] = {
2137 KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2138 KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2139 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2140 {}
2141};
2142
2143/*
2144 * Test suite for a basic rate clock, without any parent.
2145 *
2146 * These tests exercise the rate range API: clk_set_rate_range(),
2147 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2148 * driver that will always try to run at the lowest possible rate.
2149 */
2150static struct kunit_suite clk_range_minimize_test_suite = {
2151 .name = "clk-range-minimize-test",
2152 .init = clk_minimize_test_init,
2153 .exit = clk_test_exit,
2154 .test_cases = clk_range_minimize_test_cases,
2155};
2156
2157struct clk_leaf_mux_ctx {
2158 struct clk_multiple_parent_ctx mux_ctx;
2159 struct clk_hw hw;
2160 struct clk_hw parent;
2161 struct clk_rate_request *req;
2162 int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2163};
2164
2165static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
2166{
2167 struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw);
2168 int ret;
2169 struct clk_rate_request *parent_req = ctx->req;
2170
2171 clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate);
2172 ret = ctx->determine_rate_func(req->best_parent_hw, parent_req);
2173 if (ret)
2174 return ret;
2175
2176 req->rate = parent_req->rate;
2177
2178 return 0;
2179}
2180
2181static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = {
2182 .determine_rate = clk_leaf_mux_determine_rate,
2183 .set_parent = clk_dummy_single_set_parent,
2184 .get_parent = clk_dummy_single_get_parent,
2185};
2186
2187static int
2188clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2189{
2190 struct clk_leaf_mux_ctx *ctx;
2191 const char *top_parents[2] = { "parent-0", "parent-1" };
2192 int ret;
2193
2194 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2195 if (!ctx)
2196 return -ENOMEM;
2197 test->priv = ctx;
2198
2199 ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2200 &clk_dummy_rate_ops,
2201 0);
2202 ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2203 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2204 if (ret)
2205 return ret;
2206
2207 ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2208 &clk_dummy_rate_ops,
2209 0);
2210 ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2211 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2212 if (ret)
2213 return ret;
2214
2215 ctx->mux_ctx.current_parent = 0;
2216 ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2217 &clk_multiple_parents_mux_ops,
2218 0);
2219 ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2220 if (ret)
2221 return ret;
2222
2223 ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw,
2224 &empty_clk_ops, CLK_SET_RATE_PARENT);
2225 ret = clk_hw_register(NULL, &ctx->parent);
2226 if (ret)
2227 return ret;
2228
2229 ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent,
2230 &clk_leaf_mux_set_rate_parent_ops,
2231 CLK_SET_RATE_PARENT);
2232 ret = clk_hw_register(NULL, &ctx->hw);
2233 if (ret)
2234 return ret;
2235
2236 return 0;
2237}
2238
2239static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2240{
2241 struct clk_leaf_mux_ctx *ctx = test->priv;
2242
2243 clk_hw_unregister(&ctx->hw);
2244 clk_hw_unregister(&ctx->parent);
2245 clk_hw_unregister(&ctx->mux_ctx.hw);
2246 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2247 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2248}
2249
2250struct clk_leaf_mux_set_rate_parent_determine_rate_test_case {
2251 const char *desc;
2252 int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2253};
2254
2255static void
2256clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(
2257 const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc)
2258{
2259 strcpy(desc, t->desc);
2260}
2261
2262static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case
2263clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = {
2264 {
2265 /*
2266 * Test that __clk_determine_rate() on the parent that can't
2267 * change rate doesn't return a clk_rate_request structure with
2268 * the best_parent_hw pointer pointing to the parent.
2269 */
2270 .desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent",
2271 .determine_rate_func = __clk_determine_rate,
2272 },
2273 {
2274 /*
2275 * Test that __clk_mux_determine_rate() on the parent that
2276 * can't change rate doesn't return a clk_rate_request
2277 * structure with the best_parent_hw pointer pointing to
2278 * the parent.
2279 */
2280 .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent",
2281 .determine_rate_func = __clk_mux_determine_rate,
2282 },
2283 {
2284 /*
2285 * Test that __clk_mux_determine_rate_closest() on the parent
2286 * that can't change rate doesn't return a clk_rate_request
2287 * structure with the best_parent_hw pointer pointing to
2288 * the parent.
2289 */
2290 .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent",
2291 .determine_rate_func = __clk_mux_determine_rate_closest,
2292 },
2293 {
2294 /*
2295 * Test that clk_hw_determine_rate_no_reparent() on the parent
2296 * that can't change rate doesn't return a clk_rate_request
2297 * structure with the best_parent_hw pointer pointing to
2298 * the parent.
2299 */
2300 .desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent",
2301 .determine_rate_func = clk_hw_determine_rate_no_reparent,
2302 },
2303};
2304
2305KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2306 clk_leaf_mux_set_rate_parent_determine_rate_test_cases,
2307 clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)
2308
2309/*
2310 * Test that when a clk that can't change rate itself calls a function like
2311 * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request
2312 * structure that has the best_parent_hw pointer point to the clk_hw passed
2313 * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop
2314 * forwarding clk_rate_requests to the parent") for more background.
2315 */
2316static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test)
2317{
2318 struct clk_leaf_mux_ctx *ctx = test->priv;
2319 struct clk_hw *hw = &ctx->hw;
2320 struct clk *clk = clk_hw_get_clk(hw, NULL);
2321 struct clk_rate_request req;
2322 unsigned long rate;
2323 const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param;
2324
2325 test_param = test->param_value;
2326 ctx->determine_rate_func = test_param->determine_rate_func;
2327
2328 ctx->req = &req;
2329 rate = clk_get_rate(clk);
2330 KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2331 KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2));
2332
2333 KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2334 KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2335 KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2336
2337 clk_put(clk);
2338}
2339
2340static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2341 KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2342 clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params),
2343 {}
2344};
2345
2346/*
2347 * Test suite for a clock whose parent is a pass-through clk whose parent is a
2348 * mux with multiple parents. The leaf and pass-through clocks have the
2349 * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which
2350 * will then select which parent is the best fit for a given rate.
2351 *
2352 * These tests exercise the behaviour of muxes, and the proper selection
2353 * of parents.
2354 */
2355static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2356 .name = "clk-leaf-mux-set-rate-parent",
2357 .init = clk_leaf_mux_set_rate_parent_test_init,
2358 .exit = clk_leaf_mux_set_rate_parent_test_exit,
2359 .test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2360};
2361
2362struct clk_mux_notifier_rate_change {
2363 bool done;
2364 unsigned long old_rate;
2365 unsigned long new_rate;
2366 wait_queue_head_t wq;
2367};
2368
2369struct clk_mux_notifier_ctx {
2370 struct clk_multiple_parent_ctx mux_ctx;
2371 struct clk *clk;
2372 struct notifier_block clk_nb;
2373 struct clk_mux_notifier_rate_change pre_rate_change;
2374 struct clk_mux_notifier_rate_change post_rate_change;
2375};
2376
2377#define NOTIFIER_TIMEOUT_MS 100
2378
2379static int clk_mux_notifier_callback(struct notifier_block *nb,
2380 unsigned long action, void *data)
2381{
2382 struct clk_notifier_data *clk_data = data;
2383 struct clk_mux_notifier_ctx *ctx = container_of(nb,
2384 struct clk_mux_notifier_ctx,
2385 clk_nb);
2386
2387 if (action & PRE_RATE_CHANGE) {
2388 ctx->pre_rate_change.old_rate = clk_data->old_rate;
2389 ctx->pre_rate_change.new_rate = clk_data->new_rate;
2390 ctx->pre_rate_change.done = true;
2391 wake_up_interruptible(&ctx->pre_rate_change.wq);
2392 }
2393
2394 if (action & POST_RATE_CHANGE) {
2395 ctx->post_rate_change.old_rate = clk_data->old_rate;
2396 ctx->post_rate_change.new_rate = clk_data->new_rate;
2397 ctx->post_rate_change.done = true;
2398 wake_up_interruptible(&ctx->post_rate_change.wq);
2399 }
2400
2401 return 0;
2402}
2403
2404static int clk_mux_notifier_test_init(struct kunit *test)
2405{
2406 struct clk_mux_notifier_ctx *ctx;
2407 const char *top_parents[2] = { "parent-0", "parent-1" };
2408 int ret;
2409
2410 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2411 if (!ctx)
2412 return -ENOMEM;
2413 test->priv = ctx;
2414 ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2415 init_waitqueue_head(&ctx->pre_rate_change.wq);
2416 init_waitqueue_head(&ctx->post_rate_change.wq);
2417
2418 ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2419 &clk_dummy_rate_ops,
2420 0);
2421 ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2422 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2423 if (ret)
2424 return ret;
2425
2426 ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2427 &clk_dummy_rate_ops,
2428 0);
2429 ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2430 ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2431 if (ret)
2432 return ret;
2433
2434 ctx->mux_ctx.current_parent = 0;
2435 ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2436 &clk_multiple_parents_mux_ops,
2437 0);
2438 ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2439 if (ret)
2440 return ret;
2441
2442 ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2443 ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2444 if (ret)
2445 return ret;
2446
2447 return 0;
2448}
2449
2450static void clk_mux_notifier_test_exit(struct kunit *test)
2451{
2452 struct clk_mux_notifier_ctx *ctx = test->priv;
2453 struct clk *clk = ctx->clk;
2454
2455 clk_notifier_unregister(clk, &ctx->clk_nb);
2456 clk_put(clk);
2457
2458 clk_hw_unregister(&ctx->mux_ctx.hw);
2459 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2460 clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2461}
2462
2463/*
2464 * Test that if the we have a notifier registered on a mux, the core
2465 * will notify us when we switch to another parent, and with the proper
2466 * old and new rates.
2467 */
2468static void clk_mux_notifier_set_parent_test(struct kunit *test)
2469{
2470 struct clk_mux_notifier_ctx *ctx = test->priv;
2471 struct clk_hw *hw = &ctx->mux_ctx.hw;
2472 struct clk *clk = clk_hw_get_clk(hw, NULL);
2473 struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2474 int ret;
2475
2476 ret = clk_set_parent(clk, new_parent);
2477 KUNIT_ASSERT_EQ(test, ret, 0);
2478
2479 ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2480 ctx->pre_rate_change.done,
2481 msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2482 KUNIT_ASSERT_GT(test, ret, 0);
2483
2484 KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2485 KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2486
2487 ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2488 ctx->post_rate_change.done,
2489 msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2490 KUNIT_ASSERT_GT(test, ret, 0);
2491
2492 KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2493 KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2494
2495 clk_put(new_parent);
2496 clk_put(clk);
2497}
2498
2499static struct kunit_case clk_mux_notifier_test_cases[] = {
2500 KUNIT_CASE(clk_mux_notifier_set_parent_test),
2501 {}
2502};
2503
2504/*
2505 * Test suite for a mux with multiple parents, and a notifier registered
2506 * on the mux.
2507 *
2508 * These tests exercise the behaviour of notifiers.
2509 */
2510static struct kunit_suite clk_mux_notifier_test_suite = {
2511 .name = "clk-mux-notifier",
2512 .init = clk_mux_notifier_test_init,
2513 .exit = clk_mux_notifier_test_exit,
2514 .test_cases = clk_mux_notifier_test_cases,
2515};
2516
2517static int
2518clk_mux_no_reparent_test_init(struct kunit *test)
2519{
2520 struct clk_multiple_parent_ctx *ctx;
2521 const char *parents[2] = { "parent-0", "parent-1"};
2522 int ret;
2523
2524 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2525 if (!ctx)
2526 return -ENOMEM;
2527 test->priv = ctx;
2528
2529 ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2530 &clk_dummy_rate_ops,
2531 0);
2532 ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2533 ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
2534 if (ret)
2535 return ret;
2536
2537 ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2538 &clk_dummy_rate_ops,
2539 0);
2540 ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2541 ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
2542 if (ret)
2543 return ret;
2544
2545 ctx->current_parent = 0;
2546 ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
2547 &clk_multiple_parents_no_reparent_mux_ops,
2548 0);
2549 ret = clk_hw_register(NULL, &ctx->hw);
2550 if (ret)
2551 return ret;
2552
2553 return 0;
2554}
2555
2556static void
2557clk_mux_no_reparent_test_exit(struct kunit *test)
2558{
2559 struct clk_multiple_parent_ctx *ctx = test->priv;
2560
2561 clk_hw_unregister(&ctx->hw);
2562 clk_hw_unregister(&ctx->parents_ctx[0].hw);
2563 clk_hw_unregister(&ctx->parents_ctx[1].hw);
2564}
2565
2566/*
2567 * Test that if the we have a mux that cannot change parent and we call
2568 * clk_round_rate() on it with a rate that should cause it to change
2569 * parent, it won't.
2570 */
2571static void clk_mux_no_reparent_round_rate(struct kunit *test)
2572{
2573 struct clk_multiple_parent_ctx *ctx = test->priv;
2574 struct clk_hw *hw = &ctx->hw;
2575 struct clk *clk = clk_hw_get_clk(hw, NULL);
2576 struct clk *other_parent, *parent;
2577 unsigned long other_parent_rate;
2578 unsigned long parent_rate;
2579 long rounded_rate;
2580
2581 parent = clk_get_parent(clk);
2582 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2583
2584 parent_rate = clk_get_rate(parent);
2585 KUNIT_ASSERT_GT(test, parent_rate, 0);
2586
2587 other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2588 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2589 KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2590
2591 other_parent_rate = clk_get_rate(other_parent);
2592 KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2593 clk_put(other_parent);
2594
2595 rounded_rate = clk_round_rate(clk, other_parent_rate);
2596 KUNIT_ASSERT_GT(test, rounded_rate, 0);
2597 KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
2598
2599 clk_put(clk);
2600}
2601
2602/*
2603 * Test that if the we have a mux that cannot change parent and we call
2604 * clk_set_rate() on it with a rate that should cause it to change
2605 * parent, it won't.
2606 */
2607static void clk_mux_no_reparent_set_rate(struct kunit *test)
2608{
2609 struct clk_multiple_parent_ctx *ctx = test->priv;
2610 struct clk_hw *hw = &ctx->hw;
2611 struct clk *clk = clk_hw_get_clk(hw, NULL);
2612 struct clk *other_parent, *parent;
2613 unsigned long other_parent_rate;
2614 unsigned long parent_rate;
2615 unsigned long rate;
2616 int ret;
2617
2618 parent = clk_get_parent(clk);
2619 KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2620
2621 parent_rate = clk_get_rate(parent);
2622 KUNIT_ASSERT_GT(test, parent_rate, 0);
2623
2624 other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2625 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2626 KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2627
2628 other_parent_rate = clk_get_rate(other_parent);
2629 KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2630 clk_put(other_parent);
2631
2632 ret = clk_set_rate(clk, other_parent_rate);
2633 KUNIT_ASSERT_EQ(test, ret, 0);
2634
2635 rate = clk_get_rate(clk);
2636 KUNIT_ASSERT_GT(test, rate, 0);
2637 KUNIT_EXPECT_EQ(test, rate, parent_rate);
2638
2639 clk_put(clk);
2640}
2641
2642static struct kunit_case clk_mux_no_reparent_test_cases[] = {
2643 KUNIT_CASE(clk_mux_no_reparent_round_rate),
2644 KUNIT_CASE(clk_mux_no_reparent_set_rate),
2645 {}
2646};
2647
2648/*
2649 * Test suite for a clock mux that isn't allowed to change parent, using
2650 * the clk_hw_determine_rate_no_reparent() helper.
2651 *
2652 * These tests exercise that helper, and the proper selection of
2653 * rates and parents.
2654 */
2655static struct kunit_suite clk_mux_no_reparent_test_suite = {
2656 .name = "clk-mux-no-reparent",
2657 .init = clk_mux_no_reparent_test_init,
2658 .exit = clk_mux_no_reparent_test_exit,
2659 .test_cases = clk_mux_no_reparent_test_cases,
2660};
2661
2662kunit_test_suites(
2663 &clk_leaf_mux_set_rate_parent_test_suite,
2664 &clk_test_suite,
2665 &clk_multiple_parents_mux_test_suite,
2666 &clk_mux_no_reparent_test_suite,
2667 &clk_mux_notifier_test_suite,
2668 &clk_orphan_transparent_multiple_parent_mux_test_suite,
2669 &clk_orphan_transparent_single_parent_test_suite,
2670 &clk_orphan_two_level_root_last_test_suite,
2671 &clk_range_test_suite,
2672 &clk_range_maximize_test_suite,
2673 &clk_range_minimize_test_suite,
2674 &clk_single_parent_mux_test_suite,
2675 &clk_uncached_test_suite
2676);
2677MODULE_LICENSE("GPL v2");