Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Kunit test for clk rate management
   4 */
   5#include <linux/clk.h>
   6#include <linux/clk-provider.h>
 
 
 
   7
   8/* Needed for clk_hw_get_clk() */
   9#include "clk.h"
  10
 
 
 
  11#include <kunit/test.h>
  12
 
 
 
 
 
  13#define DUMMY_CLOCK_INIT_RATE	(42 * 1000 * 1000)
  14#define DUMMY_CLOCK_RATE_1	(142 * 1000 * 1000)
  15#define DUMMY_CLOCK_RATE_2	(242 * 1000 * 1000)
  16
  17struct clk_dummy_context {
  18	struct clk_hw hw;
  19	unsigned long rate;
  20};
  21
  22static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
  23					   unsigned long parent_rate)
  24{
  25	struct clk_dummy_context *ctx =
  26		container_of(hw, struct clk_dummy_context, hw);
  27
  28	return ctx->rate;
  29}
  30
  31static int clk_dummy_determine_rate(struct clk_hw *hw,
  32				    struct clk_rate_request *req)
  33{
  34	/* Just return the same rate without modifying it */
  35	return 0;
  36}
  37
  38static int clk_dummy_maximize_rate(struct clk_hw *hw,
  39				   struct clk_rate_request *req)
  40{
  41	/*
  42	 * If there's a maximum set, always run the clock at the maximum
  43	 * allowed.
  44	 */
  45	if (req->max_rate < ULONG_MAX)
  46		req->rate = req->max_rate;
  47
  48	return 0;
  49}
  50
  51static int clk_dummy_minimize_rate(struct clk_hw *hw,
  52				   struct clk_rate_request *req)
  53{
  54	/*
  55	 * If there's a minimum set, always run the clock at the minimum
  56	 * allowed.
  57	 */
  58	if (req->min_rate > 0)
  59		req->rate = req->min_rate;
  60
  61	return 0;
  62}
  63
  64static int clk_dummy_set_rate(struct clk_hw *hw,
  65			      unsigned long rate,
  66			      unsigned long parent_rate)
  67{
  68	struct clk_dummy_context *ctx =
  69		container_of(hw, struct clk_dummy_context, hw);
  70
  71	ctx->rate = rate;
  72	return 0;
  73}
  74
  75static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
  76{
  77	if (index >= clk_hw_get_num_parents(hw))
  78		return -EINVAL;
  79
  80	return 0;
  81}
  82
  83static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
  84{
  85	return 0;
  86}
  87
  88static const struct clk_ops clk_dummy_rate_ops = {
  89	.recalc_rate = clk_dummy_recalc_rate,
  90	.determine_rate = clk_dummy_determine_rate,
  91	.set_rate = clk_dummy_set_rate,
  92};
  93
  94static const struct clk_ops clk_dummy_maximize_rate_ops = {
  95	.recalc_rate = clk_dummy_recalc_rate,
  96	.determine_rate = clk_dummy_maximize_rate,
  97	.set_rate = clk_dummy_set_rate,
  98};
  99
 100static const struct clk_ops clk_dummy_minimize_rate_ops = {
 101	.recalc_rate = clk_dummy_recalc_rate,
 102	.determine_rate = clk_dummy_minimize_rate,
 103	.set_rate = clk_dummy_set_rate,
 104};
 105
 106static const struct clk_ops clk_dummy_single_parent_ops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 107	.set_parent = clk_dummy_single_set_parent,
 108	.get_parent = clk_dummy_single_get_parent,
 109};
 110
 111struct clk_multiple_parent_ctx {
 112	struct clk_dummy_context parents_ctx[2];
 113	struct clk_hw hw;
 114	u8 current_parent;
 115};
 116
 117static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
 118{
 119	struct clk_multiple_parent_ctx *ctx =
 120		container_of(hw, struct clk_multiple_parent_ctx, hw);
 121
 122	if (index >= clk_hw_get_num_parents(hw))
 123		return -EINVAL;
 124
 125	ctx->current_parent = index;
 126
 127	return 0;
 128}
 129
 130static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
 131{
 132	struct clk_multiple_parent_ctx *ctx =
 133		container_of(hw, struct clk_multiple_parent_ctx, hw);
 134
 135	return ctx->current_parent;
 136}
 137
 138static const struct clk_ops clk_multiple_parents_mux_ops = {
 139	.get_parent = clk_multiple_parents_mux_get_parent,
 140	.set_parent = clk_multiple_parents_mux_set_parent,
 141	.determine_rate = __clk_mux_determine_rate_closest,
 142};
 143
 
 
 
 
 
 
 144static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
 145{
 146	struct clk_dummy_context *ctx;
 147	struct clk_init_data init = { };
 148	int ret;
 149
 150	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 151	if (!ctx)
 152		return -ENOMEM;
 153	ctx->rate = DUMMY_CLOCK_INIT_RATE;
 154	test->priv = ctx;
 155
 156	init.name = "test_dummy_rate";
 157	init.ops = ops;
 158	ctx->hw.init = &init;
 159
 160	ret = clk_hw_register(NULL, &ctx->hw);
 161	if (ret)
 162		return ret;
 163
 164	return 0;
 165}
 166
 167static int clk_test_init(struct kunit *test)
 168{
 169	return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
 170}
 171
 172static int clk_maximize_test_init(struct kunit *test)
 173{
 174	return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
 175}
 176
 177static int clk_minimize_test_init(struct kunit *test)
 178{
 179	return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
 180}
 181
 182static void clk_test_exit(struct kunit *test)
 183{
 184	struct clk_dummy_context *ctx = test->priv;
 185
 186	clk_hw_unregister(&ctx->hw);
 187}
 188
 189/*
 190 * Test that the actual rate matches what is returned by clk_get_rate()
 191 */
 192static void clk_test_get_rate(struct kunit *test)
 193{
 194	struct clk_dummy_context *ctx = test->priv;
 195	struct clk_hw *hw = &ctx->hw;
 196	struct clk *clk = clk_hw_get_clk(hw, NULL);
 197	unsigned long rate;
 198
 199	rate = clk_get_rate(clk);
 200	KUNIT_ASSERT_GT(test, rate, 0);
 201	KUNIT_EXPECT_EQ(test, rate, ctx->rate);
 202
 203	clk_put(clk);
 204}
 205
 206/*
 207 * Test that, after a call to clk_set_rate(), the rate returned by
 208 * clk_get_rate() matches.
 209 *
 210 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
 211 * modify the requested rate, which is our case in clk_dummy_rate_ops.
 212 */
 213static void clk_test_set_get_rate(struct kunit *test)
 214{
 215	struct clk_dummy_context *ctx = test->priv;
 216	struct clk_hw *hw = &ctx->hw;
 217	struct clk *clk = clk_hw_get_clk(hw, NULL);
 218	unsigned long rate;
 219
 220	KUNIT_ASSERT_EQ(test,
 221			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
 222			0);
 223
 224	rate = clk_get_rate(clk);
 225	KUNIT_ASSERT_GT(test, rate, 0);
 226	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
 227
 228	clk_put(clk);
 229}
 230
 231/*
 232 * Test that, after several calls to clk_set_rate(), the rate returned
 233 * by clk_get_rate() matches the last one.
 234 *
 235 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
 236 * modify the requested rate, which is our case in clk_dummy_rate_ops.
 237 */
 238static void clk_test_set_set_get_rate(struct kunit *test)
 239{
 240	struct clk_dummy_context *ctx = test->priv;
 241	struct clk_hw *hw = &ctx->hw;
 242	struct clk *clk = clk_hw_get_clk(hw, NULL);
 243	unsigned long rate;
 244
 245	KUNIT_ASSERT_EQ(test,
 246			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
 247			0);
 248
 249	KUNIT_ASSERT_EQ(test,
 250			clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
 251			0);
 252
 253	rate = clk_get_rate(clk);
 254	KUNIT_ASSERT_GT(test, rate, 0);
 255	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
 256
 257	clk_put(clk);
 258}
 259
 260/*
 261 * Test that clk_round_rate and clk_set_rate are consitent and will
 262 * return the same frequency.
 263 */
 264static void clk_test_round_set_get_rate(struct kunit *test)
 265{
 266	struct clk_dummy_context *ctx = test->priv;
 267	struct clk_hw *hw = &ctx->hw;
 268	struct clk *clk = clk_hw_get_clk(hw, NULL);
 269	unsigned long rounded_rate, set_rate;
 
 270
 271	rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
 272	KUNIT_ASSERT_GT(test, rounded_rate, 0);
 273	KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
 274
 275	KUNIT_ASSERT_EQ(test,
 276			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
 277			0);
 278
 279	set_rate = clk_get_rate(clk);
 280	KUNIT_ASSERT_GT(test, set_rate, 0);
 281	KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
 282
 283	clk_put(clk);
 284}
 285
 286static struct kunit_case clk_test_cases[] = {
 287	KUNIT_CASE(clk_test_get_rate),
 288	KUNIT_CASE(clk_test_set_get_rate),
 289	KUNIT_CASE(clk_test_set_set_get_rate),
 290	KUNIT_CASE(clk_test_round_set_get_rate),
 291	{}
 292};
 293
 294/*
 295 * Test suite for a basic rate clock, without any parent.
 296 *
 297 * These tests exercise the rate API with simple scenarios
 298 */
 299static struct kunit_suite clk_test_suite = {
 300	.name = "clk-test",
 301	.init = clk_test_init,
 302	.exit = clk_test_exit,
 303	.test_cases = clk_test_cases,
 304};
 305
 306static int clk_uncached_test_init(struct kunit *test)
 307{
 308	struct clk_dummy_context *ctx;
 309	int ret;
 310
 311	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 312	if (!ctx)
 313		return -ENOMEM;
 314	test->priv = ctx;
 315
 316	ctx->rate = DUMMY_CLOCK_INIT_RATE;
 317	ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
 318					     &clk_dummy_rate_ops,
 319					     CLK_GET_RATE_NOCACHE);
 320
 321	ret = clk_hw_register(NULL, &ctx->hw);
 322	if (ret)
 323		return ret;
 324
 325	return 0;
 326}
 327
 328/*
 329 * Test that for an uncached clock, the clock framework doesn't cache
 330 * the rate and clk_get_rate() will return the underlying clock rate
 331 * even if it changed.
 332 */
 333static void clk_test_uncached_get_rate(struct kunit *test)
 334{
 335	struct clk_dummy_context *ctx = test->priv;
 336	struct clk_hw *hw = &ctx->hw;
 337	struct clk *clk = clk_hw_get_clk(hw, NULL);
 338	unsigned long rate;
 339
 340	rate = clk_get_rate(clk);
 341	KUNIT_ASSERT_GT(test, rate, 0);
 342	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
 343
 344	/* We change the rate behind the clock framework's back */
 345	ctx->rate = DUMMY_CLOCK_RATE_1;
 346	rate = clk_get_rate(clk);
 347	KUNIT_ASSERT_GT(test, rate, 0);
 348	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
 349
 350	clk_put(clk);
 351}
 352
 353/*
 354 * Test that for an uncached clock, clk_set_rate_range() will work
 355 * properly if the rate hasn't changed.
 356 */
 357static void clk_test_uncached_set_range(struct kunit *test)
 358{
 359	struct clk_dummy_context *ctx = test->priv;
 360	struct clk_hw *hw = &ctx->hw;
 361	struct clk *clk = clk_hw_get_clk(hw, NULL);
 362	unsigned long rate;
 363
 364	KUNIT_ASSERT_EQ(test,
 365			clk_set_rate_range(clk,
 366					   DUMMY_CLOCK_RATE_1,
 367					   DUMMY_CLOCK_RATE_2),
 368			0);
 369
 370	rate = clk_get_rate(clk);
 371	KUNIT_ASSERT_GT(test, rate, 0);
 372	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
 373	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
 374
 375	clk_put(clk);
 376}
 377
 378/*
 379 * Test that for an uncached clock, clk_set_rate_range() will work
 380 * properly if the rate has changed in hardware.
 381 *
 382 * In this case, it means that if the rate wasn't initially in the range
 383 * we're trying to set, but got changed at some point into the range
 384 * without the kernel knowing about it, its rate shouldn't be affected.
 385 */
 386static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
 387{
 388	struct clk_dummy_context *ctx = test->priv;
 389	struct clk_hw *hw = &ctx->hw;
 390	struct clk *clk = clk_hw_get_clk(hw, NULL);
 391	unsigned long rate;
 392
 393	/* We change the rate behind the clock framework's back */
 394	ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
 395	KUNIT_ASSERT_EQ(test,
 396			clk_set_rate_range(clk,
 397					   DUMMY_CLOCK_RATE_1,
 398					   DUMMY_CLOCK_RATE_2),
 399			0);
 400
 401	rate = clk_get_rate(clk);
 402	KUNIT_ASSERT_GT(test, rate, 0);
 403	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
 404
 405	clk_put(clk);
 406}
 407
 408static struct kunit_case clk_uncached_test_cases[] = {
 409	KUNIT_CASE(clk_test_uncached_get_rate),
 410	KUNIT_CASE(clk_test_uncached_set_range),
 411	KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
 412	{}
 413};
 414
 415/*
 416 * Test suite for a basic, uncached, rate clock, without any parent.
 417 *
 418 * These tests exercise the rate API with simple scenarios
 419 */
 420static struct kunit_suite clk_uncached_test_suite = {
 421	.name = "clk-uncached-test",
 422	.init = clk_uncached_test_init,
 423	.exit = clk_test_exit,
 424	.test_cases = clk_uncached_test_cases,
 425};
 426
 427static int
 428clk_multiple_parents_mux_test_init(struct kunit *test)
 429{
 430	struct clk_multiple_parent_ctx *ctx;
 431	const char *parents[2] = { "parent-0", "parent-1"};
 432	int ret;
 433
 434	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 435	if (!ctx)
 436		return -ENOMEM;
 437	test->priv = ctx;
 438
 439	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
 440							    &clk_dummy_rate_ops,
 441							    0);
 442	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
 443	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
 444	if (ret)
 445		return ret;
 446
 447	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
 448							    &clk_dummy_rate_ops,
 449							    0);
 450	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
 451	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
 452	if (ret)
 453		return ret;
 454
 455	ctx->current_parent = 0;
 456	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
 457					   &clk_multiple_parents_mux_ops,
 458					   CLK_SET_RATE_PARENT);
 459	ret = clk_hw_register(NULL, &ctx->hw);
 460	if (ret)
 461		return ret;
 462
 463	return 0;
 464}
 465
 466static void
 467clk_multiple_parents_mux_test_exit(struct kunit *test)
 468{
 469	struct clk_multiple_parent_ctx *ctx = test->priv;
 470
 471	clk_hw_unregister(&ctx->hw);
 472	clk_hw_unregister(&ctx->parents_ctx[0].hw);
 473	clk_hw_unregister(&ctx->parents_ctx[1].hw);
 474}
 475
 476/*
 477 * Test that for a clock with multiple parents, clk_get_parent()
 478 * actually returns the current one.
 479 */
 480static void
 481clk_test_multiple_parents_mux_get_parent(struct kunit *test)
 482{
 483	struct clk_multiple_parent_ctx *ctx = test->priv;
 484	struct clk_hw *hw = &ctx->hw;
 485	struct clk *clk = clk_hw_get_clk(hw, NULL);
 486	struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
 487
 488	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
 489
 490	clk_put(parent);
 491	clk_put(clk);
 492}
 493
 494/*
 495 * Test that for a clock with a multiple parents, clk_has_parent()
 496 * actually reports all of them as parents.
 497 */
 498static void
 499clk_test_multiple_parents_mux_has_parent(struct kunit *test)
 500{
 501	struct clk_multiple_parent_ctx *ctx = test->priv;
 502	struct clk_hw *hw = &ctx->hw;
 503	struct clk *clk = clk_hw_get_clk(hw, NULL);
 504	struct clk *parent;
 505
 506	parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
 507	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
 508	clk_put(parent);
 509
 510	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 511	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
 512	clk_put(parent);
 513
 514	clk_put(clk);
 515}
 516
 517/*
 518 * Test that for a clock with a multiple parents, if we set a range on
 519 * that clock and the parent is changed, its rate after the reparenting
 520 * is still within the range we asked for.
 521 *
 522 * FIXME: clk_set_parent() only does the reparenting but doesn't
 523 * reevaluate whether the new clock rate is within its boundaries or
 524 * not.
 525 */
 526static void
 527clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
 528{
 529	struct clk_multiple_parent_ctx *ctx = test->priv;
 530	struct clk_hw *hw = &ctx->hw;
 531	struct clk *clk = clk_hw_get_clk(hw, NULL);
 532	struct clk *parent1, *parent2;
 533	unsigned long rate;
 534	int ret;
 535
 536	kunit_skip(test, "This needs to be fixed in the core.");
 537
 538	parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
 539	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
 540	KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
 541
 542	parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 543	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
 544
 545	ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
 546	KUNIT_ASSERT_EQ(test, ret, 0);
 547
 548	ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
 549	KUNIT_ASSERT_EQ(test, ret, 0);
 550
 551	ret = clk_set_rate_range(clk,
 552				 DUMMY_CLOCK_RATE_1 - 1000,
 553				 DUMMY_CLOCK_RATE_1 + 1000);
 554	KUNIT_ASSERT_EQ(test, ret, 0);
 555
 556	ret = clk_set_parent(clk, parent2);
 557	KUNIT_ASSERT_EQ(test, ret, 0);
 558
 559	rate = clk_get_rate(clk);
 560	KUNIT_ASSERT_GT(test, rate, 0);
 561	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
 562	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
 563
 564	clk_put(parent2);
 565	clk_put(parent1);
 566	clk_put(clk);
 567}
 568
 569static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
 570	KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
 571	KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
 572	KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
 573	{}
 574};
 575
 576/*
 577 * Test suite for a basic mux clock with two parents, with
 578 * CLK_SET_RATE_PARENT on the child.
 579 *
 580 * These tests exercise the consumer API and check that the state of the
 581 * child and parents are sane and consistent.
 582 */
 583static struct kunit_suite
 584clk_multiple_parents_mux_test_suite = {
 585	.name = "clk-multiple-parents-mux-test",
 586	.init = clk_multiple_parents_mux_test_init,
 587	.exit = clk_multiple_parents_mux_test_exit,
 588	.test_cases = clk_multiple_parents_mux_test_cases,
 589};
 590
 591static int
 592clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
 593{
 594	struct clk_multiple_parent_ctx *ctx;
 595	const char *parents[2] = { "missing-parent", "proper-parent"};
 596	int ret;
 597
 598	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 599	if (!ctx)
 600		return -ENOMEM;
 601	test->priv = ctx;
 602
 603	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
 604							    &clk_dummy_rate_ops,
 605							    0);
 606	ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
 607	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
 608	if (ret)
 609		return ret;
 610
 611	ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
 612					   &clk_multiple_parents_mux_ops,
 613					   CLK_SET_RATE_PARENT);
 614	ret = clk_hw_register(NULL, &ctx->hw);
 615	if (ret)
 616		return ret;
 617
 618	return 0;
 619}
 620
 621static void
 622clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
 623{
 624	struct clk_multiple_parent_ctx *ctx = test->priv;
 625
 626	clk_hw_unregister(&ctx->hw);
 627	clk_hw_unregister(&ctx->parents_ctx[1].hw);
 628}
 629
 630/*
 631 * Test that, for a mux whose current parent hasn't been registered yet and is
 632 * thus orphan, clk_get_parent() will return NULL.
 633 */
 634static void
 635clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
 636{
 637	struct clk_multiple_parent_ctx *ctx = test->priv;
 638	struct clk_hw *hw = &ctx->hw;
 639	struct clk *clk = clk_hw_get_clk(hw, NULL);
 640
 641	KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
 642
 643	clk_put(clk);
 644}
 645
 646/*
 647 * Test that, for a mux whose current parent hasn't been registered yet,
 648 * calling clk_set_parent() to a valid parent will properly update the
 649 * mux parent and its orphan status.
 650 */
 651static void
 652clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
 653{
 654	struct clk_multiple_parent_ctx *ctx = test->priv;
 655	struct clk_hw *hw = &ctx->hw;
 656	struct clk *clk = clk_hw_get_clk(hw, NULL);
 657	struct clk *parent, *new_parent;
 658	int ret;
 659
 660	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 661	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 662
 663	ret = clk_set_parent(clk, parent);
 664	KUNIT_ASSERT_EQ(test, ret, 0);
 665
 666	new_parent = clk_get_parent(clk);
 667	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 668	KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
 669
 670	clk_put(parent);
 671	clk_put(clk);
 672}
 673
 674/*
 675 * Test that, for a mux that started orphan but got switched to a valid
 676 * parent, calling clk_drop_range() on the mux won't affect the parent
 677 * rate.
 678 */
 679static void
 680clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
 681{
 682	struct clk_multiple_parent_ctx *ctx = test->priv;
 683	struct clk_hw *hw = &ctx->hw;
 684	struct clk *clk = clk_hw_get_clk(hw, NULL);
 685	struct clk *parent;
 686	unsigned long parent_rate, new_parent_rate;
 687	int ret;
 688
 689	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 690	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 691
 692	parent_rate = clk_get_rate(parent);
 693	KUNIT_ASSERT_GT(test, parent_rate, 0);
 694
 695	ret = clk_set_parent(clk, parent);
 696	KUNIT_ASSERT_EQ(test, ret, 0);
 697
 698	ret = clk_drop_range(clk);
 699	KUNIT_ASSERT_EQ(test, ret, 0);
 700
 701	new_parent_rate = clk_get_rate(clk);
 702	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
 703	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
 704
 705	clk_put(parent);
 706	clk_put(clk);
 707}
 708
 709/*
 710 * Test that, for a mux that started orphan but got switched to a valid
 711 * parent, the rate of the mux and its new parent are consistent.
 712 */
 713static void
 714clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
 715{
 716	struct clk_multiple_parent_ctx *ctx = test->priv;
 717	struct clk_hw *hw = &ctx->hw;
 718	struct clk *clk = clk_hw_get_clk(hw, NULL);
 719	struct clk *parent;
 720	unsigned long parent_rate, rate;
 721	int ret;
 722
 723	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 724	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 725
 726	parent_rate = clk_get_rate(parent);
 727	KUNIT_ASSERT_GT(test, parent_rate, 0);
 728
 729	ret = clk_set_parent(clk, parent);
 730	KUNIT_ASSERT_EQ(test, ret, 0);
 731
 732	rate = clk_get_rate(clk);
 733	KUNIT_ASSERT_GT(test, rate, 0);
 734	KUNIT_EXPECT_EQ(test, parent_rate, rate);
 735
 736	clk_put(parent);
 737	clk_put(clk);
 738}
 739
 740/*
 741 * Test that, for a mux that started orphan but got switched to a valid
 742 * parent, calling clk_put() on the mux won't affect the parent rate.
 743 */
 744static void
 745clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
 746{
 747	struct clk_multiple_parent_ctx *ctx = test->priv;
 748	struct clk *clk, *parent;
 749	unsigned long parent_rate, new_parent_rate;
 750	int ret;
 751
 752	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 753	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 754
 755	clk = clk_hw_get_clk(&ctx->hw, NULL);
 756	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
 757
 758	parent_rate = clk_get_rate(parent);
 759	KUNIT_ASSERT_GT(test, parent_rate, 0);
 760
 761	ret = clk_set_parent(clk, parent);
 762	KUNIT_ASSERT_EQ(test, ret, 0);
 763
 764	clk_put(clk);
 765
 766	new_parent_rate = clk_get_rate(parent);
 767	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
 768	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
 769
 770	clk_put(parent);
 771}
 772
 773/*
 774 * Test that, for a mux that started orphan but got switched to a valid
 775 * parent, calling clk_set_rate_range() will affect the parent state if
 776 * its rate is out of range.
 777 */
 778static void
 779clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
 780{
 781	struct clk_multiple_parent_ctx *ctx = test->priv;
 782	struct clk_hw *hw = &ctx->hw;
 783	struct clk *clk = clk_hw_get_clk(hw, NULL);
 784	struct clk *parent;
 785	unsigned long rate;
 786	int ret;
 787
 788	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 789	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 790
 791	ret = clk_set_parent(clk, parent);
 792	KUNIT_ASSERT_EQ(test, ret, 0);
 793
 794	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
 795	KUNIT_ASSERT_EQ(test, ret, 0);
 796
 797	rate = clk_get_rate(clk);
 798	KUNIT_ASSERT_GT(test, rate, 0);
 799	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
 800	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
 801
 802	clk_put(parent);
 803	clk_put(clk);
 804}
 805
 806/*
 807 * Test that, for a mux that started orphan but got switched to a valid
 808 * parent, calling clk_set_rate_range() won't affect the parent state if
 809 * its rate is within range.
 810 */
 811static void
 812clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
 813{
 814	struct clk_multiple_parent_ctx *ctx = test->priv;
 815	struct clk_hw *hw = &ctx->hw;
 816	struct clk *clk = clk_hw_get_clk(hw, NULL);
 817	struct clk *parent;
 818	unsigned long parent_rate, new_parent_rate;
 819	int ret;
 820
 821	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 822	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 823
 824	parent_rate = clk_get_rate(parent);
 825	KUNIT_ASSERT_GT(test, parent_rate, 0);
 826
 827	ret = clk_set_parent(clk, parent);
 828	KUNIT_ASSERT_EQ(test, ret, 0);
 829
 830	ret = clk_set_rate_range(clk,
 831				 DUMMY_CLOCK_INIT_RATE - 1000,
 832				 DUMMY_CLOCK_INIT_RATE + 1000);
 833	KUNIT_ASSERT_EQ(test, ret, 0);
 834
 835	new_parent_rate = clk_get_rate(parent);
 836	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
 837	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
 838
 839	clk_put(parent);
 840	clk_put(clk);
 841}
 842
 843/*
 844 * Test that, for a mux whose current parent hasn't been registered yet,
 845 * calling clk_set_rate_range() will succeed, and will be taken into
 846 * account when rounding a rate.
 847 */
 848static void
 849clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
 850{
 851	struct clk_multiple_parent_ctx *ctx = test->priv;
 852	struct clk_hw *hw = &ctx->hw;
 853	struct clk *clk = clk_hw_get_clk(hw, NULL);
 854	unsigned long rate;
 855	int ret;
 856
 857	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
 858	KUNIT_ASSERT_EQ(test, ret, 0);
 859
 860	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
 861	KUNIT_ASSERT_GT(test, rate, 0);
 862	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
 863	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
 864
 865	clk_put(clk);
 866}
 867
 868/*
 869 * Test that, for a mux that started orphan, was assigned and rate and
 870 * then got switched to a valid parent, its rate is eventually within
 871 * range.
 872 *
 873 * FIXME: Even though we update the rate as part of clk_set_parent(), we
 874 * don't evaluate whether that new rate is within range and needs to be
 875 * adjusted.
 876 */
 877static void
 878clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
 879{
 880	struct clk_multiple_parent_ctx *ctx = test->priv;
 881	struct clk_hw *hw = &ctx->hw;
 882	struct clk *clk = clk_hw_get_clk(hw, NULL);
 883	struct clk *parent;
 884	unsigned long rate;
 885	int ret;
 886
 887	kunit_skip(test, "This needs to be fixed in the core.");
 888
 889	clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
 890
 891	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 892	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 893
 894	ret = clk_set_parent(clk, parent);
 895	KUNIT_ASSERT_EQ(test, ret, 0);
 896
 897	rate = clk_get_rate(clk);
 898	KUNIT_ASSERT_GT(test, rate, 0);
 899	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
 900	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
 901
 902	clk_put(parent);
 903	clk_put(clk);
 904}
 905
 906static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
 907	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
 908	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
 909	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
 910	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
 911	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
 912	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
 913	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
 914	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
 915	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
 916	{}
 917};
 918
 919/*
 920 * Test suite for a basic mux clock with two parents. The default parent
 921 * isn't registered, only the second parent is. By default, the clock
 922 * will thus be orphan.
 923 *
 924 * These tests exercise the behaviour of the consumer API when dealing
 925 * with an orphan clock, and how we deal with the transition to a valid
 926 * parent.
 927 */
 928static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
 929	.name = "clk-orphan-transparent-multiple-parent-mux-test",
 930	.init = clk_orphan_transparent_multiple_parent_mux_test_init,
 931	.exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
 932	.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
 933};
 934
 935struct clk_single_parent_ctx {
 936	struct clk_dummy_context parent_ctx;
 937	struct clk_hw hw;
 938};
 939
 940static int clk_single_parent_mux_test_init(struct kunit *test)
 941{
 942	struct clk_single_parent_ctx *ctx;
 943	int ret;
 944
 945	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 946	if (!ctx)
 947		return -ENOMEM;
 948	test->priv = ctx;
 949
 950	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
 951	ctx->parent_ctx.hw.init =
 952		CLK_HW_INIT_NO_PARENT("parent-clk",
 953				      &clk_dummy_rate_ops,
 954				      0);
 955
 956	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
 957	if (ret)
 958		return ret;
 959
 960	ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
 961				   &clk_dummy_single_parent_ops,
 962				   CLK_SET_RATE_PARENT);
 963
 964	ret = clk_hw_register(NULL, &ctx->hw);
 965	if (ret)
 966		return ret;
 967
 968	return 0;
 969}
 970
 971static void
 972clk_single_parent_mux_test_exit(struct kunit *test)
 973{
 974	struct clk_single_parent_ctx *ctx = test->priv;
 975
 976	clk_hw_unregister(&ctx->hw);
 977	clk_hw_unregister(&ctx->parent_ctx.hw);
 978}
 979
 980/*
 981 * Test that for a clock with a single parent, clk_get_parent() actually
 982 * returns the parent.
 983 */
 984static void
 985clk_test_single_parent_mux_get_parent(struct kunit *test)
 986{
 987	struct clk_single_parent_ctx *ctx = test->priv;
 988	struct clk_hw *hw = &ctx->hw;
 989	struct clk *clk = clk_hw_get_clk(hw, NULL);
 990	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
 991
 992	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
 993
 994	clk_put(parent);
 995	clk_put(clk);
 996}
 997
 998/*
 999 * Test that for a clock with a single parent, clk_has_parent() actually
1000 * reports it as a parent.
1001 */
1002static void
1003clk_test_single_parent_mux_has_parent(struct kunit *test)
1004{
1005	struct clk_single_parent_ctx *ctx = test->priv;
1006	struct clk_hw *hw = &ctx->hw;
1007	struct clk *clk = clk_hw_get_clk(hw, NULL);
1008	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1009
1010	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1011
1012	clk_put(parent);
1013	clk_put(clk);
1014}
1015
1016/*
1017 * Test that for a clock that can't modify its rate and with a single
1018 * parent, if we set disjoints range on the parent and then the child,
1019 * the second will return an error.
1020 *
1021 * FIXME: clk_set_rate_range() only considers the current clock when
1022 * evaluating whether ranges are disjoints and not the upstream clocks
1023 * ranges.
1024 */
1025static void
1026clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1027{
1028	struct clk_single_parent_ctx *ctx = test->priv;
1029	struct clk_hw *hw = &ctx->hw;
1030	struct clk *clk = clk_hw_get_clk(hw, NULL);
1031	struct clk *parent;
1032	int ret;
1033
1034	kunit_skip(test, "This needs to be fixed in the core.");
1035
1036	parent = clk_get_parent(clk);
1037	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1038
1039	ret = clk_set_rate_range(parent, 1000, 2000);
1040	KUNIT_ASSERT_EQ(test, ret, 0);
1041
1042	ret = clk_set_rate_range(clk, 3000, 4000);
1043	KUNIT_EXPECT_LT(test, ret, 0);
1044
1045	clk_put(clk);
1046}
1047
1048/*
1049 * Test that for a clock that can't modify its rate and with a single
1050 * parent, if we set disjoints range on the child and then the parent,
1051 * the second will return an error.
1052 *
1053 * FIXME: clk_set_rate_range() only considers the current clock when
1054 * evaluating whether ranges are disjoints and not the downstream clocks
1055 * ranges.
1056 */
1057static void
1058clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1059{
1060	struct clk_single_parent_ctx *ctx = test->priv;
1061	struct clk_hw *hw = &ctx->hw;
1062	struct clk *clk = clk_hw_get_clk(hw, NULL);
1063	struct clk *parent;
1064	int ret;
1065
1066	kunit_skip(test, "This needs to be fixed in the core.");
1067
1068	parent = clk_get_parent(clk);
1069	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1070
1071	ret = clk_set_rate_range(clk, 1000, 2000);
1072	KUNIT_ASSERT_EQ(test, ret, 0);
1073
1074	ret = clk_set_rate_range(parent, 3000, 4000);
1075	KUNIT_EXPECT_LT(test, ret, 0);
1076
1077	clk_put(clk);
1078}
1079
1080/*
1081 * Test that for a clock that can't modify its rate and with a single
1082 * parent, if we set a range on the parent and then call
1083 * clk_round_rate(), the boundaries of the parent are taken into
1084 * account.
1085 */
1086static void
1087clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1088{
1089	struct clk_single_parent_ctx *ctx = test->priv;
1090	struct clk_hw *hw = &ctx->hw;
1091	struct clk *clk = clk_hw_get_clk(hw, NULL);
1092	struct clk *parent;
1093	unsigned long rate;
1094	int ret;
1095
1096	parent = clk_get_parent(clk);
1097	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1098
1099	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1100	KUNIT_ASSERT_EQ(test, ret, 0);
1101
1102	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1103	KUNIT_ASSERT_GT(test, rate, 0);
1104	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1105	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1106
1107	clk_put(clk);
1108}
1109
1110/*
1111 * Test that for a clock that can't modify its rate and with a single
1112 * parent, if we set a range on the parent and a more restrictive one on
1113 * the child, and then call clk_round_rate(), the boundaries of the
1114 * two clocks are taken into account.
1115 */
1116static void
1117clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1118{
1119	struct clk_single_parent_ctx *ctx = test->priv;
1120	struct clk_hw *hw = &ctx->hw;
1121	struct clk *clk = clk_hw_get_clk(hw, NULL);
1122	struct clk *parent;
1123	unsigned long rate;
1124	int ret;
1125
1126	parent = clk_get_parent(clk);
1127	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1128
1129	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1130	KUNIT_ASSERT_EQ(test, ret, 0);
1131
1132	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1133	KUNIT_ASSERT_EQ(test, ret, 0);
1134
1135	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1136	KUNIT_ASSERT_GT(test, rate, 0);
1137	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1138	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1139
1140	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1141	KUNIT_ASSERT_GT(test, rate, 0);
1142	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1143	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1144
1145	clk_put(clk);
1146}
1147
1148/*
1149 * Test that for a clock that can't modify its rate and with a single
1150 * parent, if we set a range on the child and a more restrictive one on
1151 * the parent, and then call clk_round_rate(), the boundaries of the
1152 * two clocks are taken into account.
1153 */
1154static void
1155clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1156{
1157	struct clk_single_parent_ctx *ctx = test->priv;
1158	struct clk_hw *hw = &ctx->hw;
1159	struct clk *clk = clk_hw_get_clk(hw, NULL);
1160	struct clk *parent;
1161	unsigned long rate;
1162	int ret;
1163
1164	parent = clk_get_parent(clk);
1165	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1166
1167	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1168	KUNIT_ASSERT_EQ(test, ret, 0);
1169
1170	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1171	KUNIT_ASSERT_EQ(test, ret, 0);
1172
1173	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1174	KUNIT_ASSERT_GT(test, rate, 0);
1175	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1176	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1177
1178	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1179	KUNIT_ASSERT_GT(test, rate, 0);
1180	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1181	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1182
1183	clk_put(clk);
1184}
1185
1186static struct kunit_case clk_single_parent_mux_test_cases[] = {
1187	KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1188	KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1189	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1190	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1191	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1192	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1193	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1194	{}
1195};
1196
1197/*
1198 * Test suite for a basic mux clock with one parent, with
1199 * CLK_SET_RATE_PARENT on the child.
1200 *
1201 * These tests exercise the consumer API and check that the state of the
1202 * child and parent are sane and consistent.
1203 */
1204static struct kunit_suite
1205clk_single_parent_mux_test_suite = {
1206	.name = "clk-single-parent-mux-test",
1207	.init = clk_single_parent_mux_test_init,
1208	.exit = clk_single_parent_mux_test_exit,
1209	.test_cases = clk_single_parent_mux_test_cases,
1210};
1211
1212static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1213{
1214	struct clk_single_parent_ctx *ctx;
1215	struct clk_init_data init = { };
1216	const char * const parents[] = { "orphan_parent" };
1217	int ret;
1218
1219	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1220	if (!ctx)
1221		return -ENOMEM;
1222	test->priv = ctx;
1223
1224	init.name = "test_orphan_dummy_parent";
1225	init.ops = &clk_dummy_single_parent_ops;
1226	init.parent_names = parents;
1227	init.num_parents = ARRAY_SIZE(parents);
1228	init.flags = CLK_SET_RATE_PARENT;
1229	ctx->hw.init = &init;
1230
1231	ret = clk_hw_register(NULL, &ctx->hw);
1232	if (ret)
1233		return ret;
1234
1235	memset(&init, 0, sizeof(init));
1236	init.name = "orphan_parent";
1237	init.ops = &clk_dummy_rate_ops;
1238	ctx->parent_ctx.hw.init = &init;
1239	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1240
1241	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1242	if (ret)
1243		return ret;
1244
1245	return 0;
1246}
1247
1248/*
1249 * Test that a mux-only clock, with an initial rate within a range,
1250 * will still have the same rate after the range has been enforced.
1251 *
1252 * See:
1253 * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1254 */
1255static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1256{
1257	struct clk_single_parent_ctx *ctx = test->priv;
1258	struct clk_hw *hw = &ctx->hw;
1259	struct clk *clk = clk_hw_get_clk(hw, NULL);
1260	unsigned long rate, new_rate;
1261
1262	rate = clk_get_rate(clk);
1263	KUNIT_ASSERT_GT(test, rate, 0);
1264
1265	KUNIT_ASSERT_EQ(test,
1266			clk_set_rate_range(clk,
1267					   ctx->parent_ctx.rate - 1000,
1268					   ctx->parent_ctx.rate + 1000),
1269			0);
1270
1271	new_rate = clk_get_rate(clk);
1272	KUNIT_ASSERT_GT(test, new_rate, 0);
1273	KUNIT_EXPECT_EQ(test, rate, new_rate);
1274
1275	clk_put(clk);
1276}
1277
1278static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1279	KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1280	{}
1281};
1282
1283/*
1284 * Test suite for a basic mux clock with one parent. The parent is
1285 * registered after its child. The clock will thus be an orphan when
1286 * registered, but will no longer be when the tests run.
1287 *
1288 * These tests make sure a clock that used to be orphan has a sane,
1289 * consistent, behaviour.
1290 */
1291static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1292	.name = "clk-orphan-transparent-single-parent-test",
1293	.init = clk_orphan_transparent_single_parent_mux_test_init,
1294	.exit = clk_single_parent_mux_test_exit,
1295	.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1296};
1297
1298struct clk_single_parent_two_lvl_ctx {
1299	struct clk_dummy_context parent_parent_ctx;
1300	struct clk_dummy_context parent_ctx;
1301	struct clk_hw hw;
1302};
1303
1304static int
1305clk_orphan_two_level_root_last_test_init(struct kunit *test)
1306{
1307	struct clk_single_parent_two_lvl_ctx *ctx;
1308	int ret;
1309
1310	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1311	if (!ctx)
1312		return -ENOMEM;
1313	test->priv = ctx;
1314
1315	ctx->parent_ctx.hw.init =
1316		CLK_HW_INIT("intermediate-parent",
1317			    "root-parent",
1318			    &clk_dummy_single_parent_ops,
1319			    CLK_SET_RATE_PARENT);
1320	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1321	if (ret)
1322		return ret;
1323
1324	ctx->hw.init =
1325		CLK_HW_INIT("test-clk", "intermediate-parent",
1326			    &clk_dummy_single_parent_ops,
1327			    CLK_SET_RATE_PARENT);
1328	ret = clk_hw_register(NULL, &ctx->hw);
1329	if (ret)
1330		return ret;
1331
1332	ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1333	ctx->parent_parent_ctx.hw.init =
1334		CLK_HW_INIT_NO_PARENT("root-parent",
1335				      &clk_dummy_rate_ops,
1336				      0);
1337	ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1338	if (ret)
1339		return ret;
1340
1341	return 0;
1342}
1343
1344static void
1345clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1346{
1347	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1348
1349	clk_hw_unregister(&ctx->hw);
1350	clk_hw_unregister(&ctx->parent_ctx.hw);
1351	clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1352}
1353
1354/*
1355 * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1356 * will return the proper rate.
1357 */
1358static void
1359clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1360{
1361	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1362	struct clk_hw *hw = &ctx->hw;
1363	struct clk *clk = clk_hw_get_clk(hw, NULL);
1364	unsigned long rate;
1365
1366	rate = clk_get_rate(clk);
1367	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1368
1369	clk_put(clk);
1370}
1371
1372/*
1373 * Test that, for a clock whose parent used to be orphan,
1374 * clk_set_rate_range() won't affect its rate if it is already within
1375 * range.
1376 *
1377 * See (for Exynos 4210):
1378 * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1379 */
1380static void
1381clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1382{
1383	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1384	struct clk_hw *hw = &ctx->hw;
1385	struct clk *clk = clk_hw_get_clk(hw, NULL);
1386	unsigned long rate;
1387	int ret;
1388
1389	ret = clk_set_rate_range(clk,
1390				 DUMMY_CLOCK_INIT_RATE - 1000,
1391				 DUMMY_CLOCK_INIT_RATE + 1000);
1392	KUNIT_ASSERT_EQ(test, ret, 0);
1393
1394	rate = clk_get_rate(clk);
1395	KUNIT_ASSERT_GT(test, rate, 0);
1396	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1397
1398	clk_put(clk);
1399}
1400
1401static struct kunit_case
1402clk_orphan_two_level_root_last_test_cases[] = {
1403	KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1404	KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1405	{}
1406};
1407
1408/*
1409 * Test suite for a basic, transparent, clock with a parent that is also
1410 * such a clock. The parent's parent is registered last, while the
1411 * parent and its child are registered in that order. The intermediate
1412 * and leaf clocks will thus be orphan when registered, but the leaf
1413 * clock itself will always have its parent and will never be
1414 * reparented. Indeed, it's only orphan because its parent is.
1415 *
1416 * These tests exercise the behaviour of the consumer API when dealing
1417 * with an orphan clock, and how we deal with the transition to a valid
1418 * parent.
1419 */
1420static struct kunit_suite
1421clk_orphan_two_level_root_last_test_suite = {
1422	.name = "clk-orphan-two-level-root-last-test",
1423	.init = clk_orphan_two_level_root_last_test_init,
1424	.exit = clk_orphan_two_level_root_last_test_exit,
1425	.test_cases = clk_orphan_two_level_root_last_test_cases,
1426};
1427
1428/*
1429 * Test that clk_set_rate_range won't return an error for a valid range
1430 * and that it will make sure the rate of the clock is within the
1431 * boundaries.
1432 */
1433static void clk_range_test_set_range(struct kunit *test)
1434{
1435	struct clk_dummy_context *ctx = test->priv;
1436	struct clk_hw *hw = &ctx->hw;
1437	struct clk *clk = clk_hw_get_clk(hw, NULL);
1438	unsigned long rate;
1439
1440	KUNIT_ASSERT_EQ(test,
1441			clk_set_rate_range(clk,
1442					   DUMMY_CLOCK_RATE_1,
1443					   DUMMY_CLOCK_RATE_2),
1444			0);
1445
1446	rate = clk_get_rate(clk);
1447	KUNIT_ASSERT_GT(test, rate, 0);
1448	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1449	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1450
1451	clk_put(clk);
1452}
1453
1454/*
1455 * Test that calling clk_set_rate_range with a minimum rate higher than
1456 * the maximum rate returns an error.
1457 */
1458static void clk_range_test_set_range_invalid(struct kunit *test)
1459{
1460	struct clk_dummy_context *ctx = test->priv;
1461	struct clk_hw *hw = &ctx->hw;
1462	struct clk *clk = clk_hw_get_clk(hw, NULL);
1463
1464	KUNIT_EXPECT_LT(test,
1465			clk_set_rate_range(clk,
1466					   DUMMY_CLOCK_RATE_1 + 1000,
1467					   DUMMY_CLOCK_RATE_1),
1468			0);
1469
1470	clk_put(clk);
1471}
1472
1473/*
1474 * Test that users can't set multiple, disjoints, range that would be
1475 * impossible to meet.
1476 */
1477static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1478{
1479	struct clk_dummy_context *ctx = test->priv;
1480	struct clk_hw *hw = &ctx->hw;
1481	struct clk *user1, *user2;
1482
1483	user1 = clk_hw_get_clk(hw, NULL);
1484	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1485
1486	user2 = clk_hw_get_clk(hw, NULL);
1487	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1488
1489	KUNIT_ASSERT_EQ(test,
1490			clk_set_rate_range(user1, 1000, 2000),
1491			0);
1492
1493	KUNIT_EXPECT_LT(test,
1494			clk_set_rate_range(user2, 3000, 4000),
1495			0);
1496
1497	clk_put(user2);
1498	clk_put(user1);
1499}
1500
1501/*
1502 * Test that if our clock has some boundaries and we try to round a rate
1503 * lower than the minimum, the returned rate will be within range.
1504 */
1505static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1506{
1507	struct clk_dummy_context *ctx = test->priv;
1508	struct clk_hw *hw = &ctx->hw;
1509	struct clk *clk = clk_hw_get_clk(hw, NULL);
1510	long rate;
1511
1512	KUNIT_ASSERT_EQ(test,
1513			clk_set_rate_range(clk,
1514					   DUMMY_CLOCK_RATE_1,
1515					   DUMMY_CLOCK_RATE_2),
1516			0);
1517
1518	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1519	KUNIT_ASSERT_GT(test, rate, 0);
1520	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1521	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1522
1523	clk_put(clk);
1524}
1525
1526/*
1527 * Test that if our clock has some boundaries and we try to set a rate
1528 * higher than the maximum, the new rate will be within range.
1529 */
1530static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1531{
1532	struct clk_dummy_context *ctx = test->priv;
1533	struct clk_hw *hw = &ctx->hw;
1534	struct clk *clk = clk_hw_get_clk(hw, NULL);
1535	unsigned long rate;
1536
1537	KUNIT_ASSERT_EQ(test,
1538			clk_set_rate_range(clk,
1539					   DUMMY_CLOCK_RATE_1,
1540					   DUMMY_CLOCK_RATE_2),
1541			0);
1542
1543	KUNIT_ASSERT_EQ(test,
1544			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1545			0);
1546
1547	rate = clk_get_rate(clk);
1548	KUNIT_ASSERT_GT(test, rate, 0);
1549	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1550	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1551
1552	clk_put(clk);
1553}
1554
1555/*
1556 * Test that if our clock has some boundaries and we try to round and
1557 * set a rate lower than the minimum, the rate returned by
1558 * clk_round_rate() will be consistent with the new rate set by
1559 * clk_set_rate().
1560 */
1561static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1562{
1563	struct clk_dummy_context *ctx = test->priv;
1564	struct clk_hw *hw = &ctx->hw;
1565	struct clk *clk = clk_hw_get_clk(hw, NULL);
1566	long rounded;
1567
1568	KUNIT_ASSERT_EQ(test,
1569			clk_set_rate_range(clk,
1570					   DUMMY_CLOCK_RATE_1,
1571					   DUMMY_CLOCK_RATE_2),
1572			0);
1573
1574	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1575	KUNIT_ASSERT_GT(test, rounded, 0);
1576
1577	KUNIT_ASSERT_EQ(test,
1578			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1579			0);
1580
1581	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1582
1583	clk_put(clk);
1584}
1585
1586/*
1587 * Test that if our clock has some boundaries and we try to round a rate
1588 * higher than the maximum, the returned rate will be within range.
1589 */
1590static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1591{
1592	struct clk_dummy_context *ctx = test->priv;
1593	struct clk_hw *hw = &ctx->hw;
1594	struct clk *clk = clk_hw_get_clk(hw, NULL);
1595	long rate;
1596
1597	KUNIT_ASSERT_EQ(test,
1598			clk_set_rate_range(clk,
1599					   DUMMY_CLOCK_RATE_1,
1600					   DUMMY_CLOCK_RATE_2),
1601			0);
1602
1603	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1604	KUNIT_ASSERT_GT(test, rate, 0);
1605	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1606	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1607
1608	clk_put(clk);
1609}
1610
1611/*
1612 * Test that if our clock has some boundaries and we try to set a rate
1613 * higher than the maximum, the new rate will be within range.
1614 */
1615static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1616{
1617	struct clk_dummy_context *ctx = test->priv;
1618	struct clk_hw *hw = &ctx->hw;
1619	struct clk *clk = clk_hw_get_clk(hw, NULL);
1620	unsigned long rate;
1621
1622	KUNIT_ASSERT_EQ(test,
1623			clk_set_rate_range(clk,
1624					   DUMMY_CLOCK_RATE_1,
1625					   DUMMY_CLOCK_RATE_2),
1626			0);
1627
1628	KUNIT_ASSERT_EQ(test,
1629			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1630			0);
1631
1632	rate = clk_get_rate(clk);
1633	KUNIT_ASSERT_GT(test, rate, 0);
1634	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1635	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1636
1637	clk_put(clk);
1638}
1639
1640/*
1641 * Test that if our clock has some boundaries and we try to round and
1642 * set a rate higher than the maximum, the rate returned by
1643 * clk_round_rate() will be consistent with the new rate set by
1644 * clk_set_rate().
1645 */
1646static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1647{
1648	struct clk_dummy_context *ctx = test->priv;
1649	struct clk_hw *hw = &ctx->hw;
1650	struct clk *clk = clk_hw_get_clk(hw, NULL);
1651	long rounded;
1652
1653	KUNIT_ASSERT_EQ(test,
1654			clk_set_rate_range(clk,
1655					   DUMMY_CLOCK_RATE_1,
1656					   DUMMY_CLOCK_RATE_2),
1657			0);
1658
1659	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1660	KUNIT_ASSERT_GT(test, rounded, 0);
1661
1662	KUNIT_ASSERT_EQ(test,
1663			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1664			0);
1665
1666	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1667
1668	clk_put(clk);
1669}
1670
1671/*
1672 * Test that if our clock has a rate lower than the minimum set by a
1673 * call to clk_set_rate_range(), the rate will be raised to match the
1674 * new minimum.
1675 *
1676 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1677 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1678 */
1679static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1680{
1681	struct clk_dummy_context *ctx = test->priv;
1682	struct clk_hw *hw = &ctx->hw;
1683	struct clk *clk = clk_hw_get_clk(hw, NULL);
1684	unsigned long rate;
1685
1686	KUNIT_ASSERT_EQ(test,
1687			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1688			0);
1689
1690	KUNIT_ASSERT_EQ(test,
1691			clk_set_rate_range(clk,
1692					   DUMMY_CLOCK_RATE_1,
1693					   DUMMY_CLOCK_RATE_2),
1694			0);
1695
1696	rate = clk_get_rate(clk);
1697	KUNIT_ASSERT_GT(test, rate, 0);
1698	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1699
1700	clk_put(clk);
1701}
1702
1703/*
1704 * Test that if our clock has a rate higher than the maximum set by a
1705 * call to clk_set_rate_range(), the rate will be lowered to match the
1706 * new maximum.
1707 *
1708 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1709 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1710 */
1711static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1712{
1713	struct clk_dummy_context *ctx = test->priv;
1714	struct clk_hw *hw = &ctx->hw;
1715	struct clk *clk = clk_hw_get_clk(hw, NULL);
1716	unsigned long rate;
1717
1718	KUNIT_ASSERT_EQ(test,
1719			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1720			0);
1721
1722	KUNIT_ASSERT_EQ(test,
1723			clk_set_rate_range(clk,
1724					   DUMMY_CLOCK_RATE_1,
1725					   DUMMY_CLOCK_RATE_2),
1726			0);
1727
1728	rate = clk_get_rate(clk);
1729	KUNIT_ASSERT_GT(test, rate, 0);
1730	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1731
1732	clk_put(clk);
1733}
1734
1735static struct kunit_case clk_range_test_cases[] = {
1736	KUNIT_CASE(clk_range_test_set_range),
1737	KUNIT_CASE(clk_range_test_set_range_invalid),
1738	KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1739	KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1740	KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1741	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1742	KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1743	KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1744	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1745	KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1746	KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1747	{}
1748};
1749
1750/*
1751 * Test suite for a basic rate clock, without any parent.
1752 *
1753 * These tests exercise the rate range API: clk_set_rate_range(),
1754 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1755 */
1756static struct kunit_suite clk_range_test_suite = {
1757	.name = "clk-range-test",
1758	.init = clk_test_init,
1759	.exit = clk_test_exit,
1760	.test_cases = clk_range_test_cases,
1761};
1762
1763/*
1764 * Test that if we have several subsequent calls to
1765 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1766 * needed each and every time.
1767 *
1768 * With clk_dummy_maximize_rate_ops, this means that the rate will
1769 * trail along the maximum as it evolves.
1770 */
1771static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1772{
1773	struct clk_dummy_context *ctx = test->priv;
1774	struct clk_hw *hw = &ctx->hw;
1775	struct clk *clk = clk_hw_get_clk(hw, NULL);
1776	unsigned long rate;
1777
1778	KUNIT_ASSERT_EQ(test,
1779			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1780			0);
1781
1782	KUNIT_ASSERT_EQ(test,
1783			clk_set_rate_range(clk,
1784					   DUMMY_CLOCK_RATE_1,
1785					   DUMMY_CLOCK_RATE_2),
1786			0);
1787
1788	rate = clk_get_rate(clk);
1789	KUNIT_ASSERT_GT(test, rate, 0);
1790	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1791
1792	KUNIT_ASSERT_EQ(test,
1793			clk_set_rate_range(clk,
1794					   DUMMY_CLOCK_RATE_1,
1795					   DUMMY_CLOCK_RATE_2 - 1000),
1796			0);
1797
1798	rate = clk_get_rate(clk);
1799	KUNIT_ASSERT_GT(test, rate, 0);
1800	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1801
1802	KUNIT_ASSERT_EQ(test,
1803			clk_set_rate_range(clk,
1804					   DUMMY_CLOCK_RATE_1,
1805					   DUMMY_CLOCK_RATE_2),
1806			0);
1807
1808	rate = clk_get_rate(clk);
1809	KUNIT_ASSERT_GT(test, rate, 0);
1810	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1811
1812	clk_put(clk);
1813}
1814
1815/*
1816 * Test that if we have several subsequent calls to
1817 * clk_set_rate_range(), across multiple users, the core will reevaluate
1818 * whether a new rate is needed each and every time.
1819 *
1820 * With clk_dummy_maximize_rate_ops, this means that the rate will
1821 * trail along the maximum as it evolves.
1822 */
1823static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1824{
1825	struct clk_dummy_context *ctx = test->priv;
1826	struct clk_hw *hw = &ctx->hw;
1827	struct clk *clk = clk_hw_get_clk(hw, NULL);
1828	struct clk *user1, *user2;
1829	unsigned long rate;
1830
1831	user1 = clk_hw_get_clk(hw, NULL);
1832	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1833
1834	user2 = clk_hw_get_clk(hw, NULL);
1835	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1836
1837	KUNIT_ASSERT_EQ(test,
1838			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1839			0);
1840
1841	KUNIT_ASSERT_EQ(test,
1842			clk_set_rate_range(user1,
1843					   0,
1844					   DUMMY_CLOCK_RATE_2),
1845			0);
1846
1847	rate = clk_get_rate(clk);
1848	KUNIT_ASSERT_GT(test, rate, 0);
1849	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1850
1851	KUNIT_ASSERT_EQ(test,
1852			clk_set_rate_range(user2,
1853					   0,
1854					   DUMMY_CLOCK_RATE_1),
1855			0);
1856
1857	rate = clk_get_rate(clk);
1858	KUNIT_ASSERT_GT(test, rate, 0);
1859	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1860
1861	KUNIT_ASSERT_EQ(test,
1862			clk_drop_range(user2),
1863			0);
1864
1865	rate = clk_get_rate(clk);
1866	KUNIT_ASSERT_GT(test, rate, 0);
1867	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1868
1869	clk_put(user2);
1870	clk_put(user1);
1871	clk_put(clk);
1872}
1873
1874/*
1875 * Test that if we have several subsequent calls to
1876 * clk_set_rate_range(), across multiple users, the core will reevaluate
1877 * whether a new rate is needed, including when a user drop its clock.
1878 *
1879 * With clk_dummy_maximize_rate_ops, this means that the rate will
1880 * trail along the maximum as it evolves.
1881 */
1882static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1883{
1884	struct clk_dummy_context *ctx = test->priv;
1885	struct clk_hw *hw = &ctx->hw;
1886	struct clk *clk = clk_hw_get_clk(hw, NULL);
1887	struct clk *user1, *user2;
1888	unsigned long rate;
1889
1890	user1 = clk_hw_get_clk(hw, NULL);
1891	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1892
1893	user2 = clk_hw_get_clk(hw, NULL);
1894	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1895
1896	KUNIT_ASSERT_EQ(test,
1897			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1898			0);
1899
1900	KUNIT_ASSERT_EQ(test,
1901			clk_set_rate_range(user1,
1902					   0,
1903					   DUMMY_CLOCK_RATE_2),
1904			0);
1905
1906	rate = clk_get_rate(clk);
1907	KUNIT_ASSERT_GT(test, rate, 0);
1908	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1909
1910	KUNIT_ASSERT_EQ(test,
1911			clk_set_rate_range(user2,
1912					   0,
1913					   DUMMY_CLOCK_RATE_1),
1914			0);
1915
1916	rate = clk_get_rate(clk);
1917	KUNIT_ASSERT_GT(test, rate, 0);
1918	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1919
1920	clk_put(user2);
1921
1922	rate = clk_get_rate(clk);
1923	KUNIT_ASSERT_GT(test, rate, 0);
1924	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1925
1926	clk_put(user1);
1927	clk_put(clk);
1928}
1929
1930static struct kunit_case clk_range_maximize_test_cases[] = {
1931	KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1932	KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1933	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1934	{}
1935};
1936
1937/*
1938 * Test suite for a basic rate clock, without any parent.
1939 *
1940 * These tests exercise the rate range API: clk_set_rate_range(),
1941 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1942 * driver that will always try to run at the highest possible rate.
1943 */
1944static struct kunit_suite clk_range_maximize_test_suite = {
1945	.name = "clk-range-maximize-test",
1946	.init = clk_maximize_test_init,
1947	.exit = clk_test_exit,
1948	.test_cases = clk_range_maximize_test_cases,
1949};
1950
1951/*
1952 * Test that if we have several subsequent calls to
1953 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1954 * needed each and every time.
1955 *
1956 * With clk_dummy_minimize_rate_ops, this means that the rate will
1957 * trail along the minimum as it evolves.
1958 */
1959static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1960{
1961	struct clk_dummy_context *ctx = test->priv;
1962	struct clk_hw *hw = &ctx->hw;
1963	struct clk *clk = clk_hw_get_clk(hw, NULL);
1964	unsigned long rate;
1965
1966	KUNIT_ASSERT_EQ(test,
1967			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1968			0);
1969
1970	KUNIT_ASSERT_EQ(test,
1971			clk_set_rate_range(clk,
1972					   DUMMY_CLOCK_RATE_1,
1973					   DUMMY_CLOCK_RATE_2),
1974			0);
1975
1976	rate = clk_get_rate(clk);
1977	KUNIT_ASSERT_GT(test, rate, 0);
1978	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1979
1980	KUNIT_ASSERT_EQ(test,
1981			clk_set_rate_range(clk,
1982					   DUMMY_CLOCK_RATE_1 + 1000,
1983					   DUMMY_CLOCK_RATE_2),
1984			0);
1985
1986	rate = clk_get_rate(clk);
1987	KUNIT_ASSERT_GT(test, rate, 0);
1988	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1989
1990	KUNIT_ASSERT_EQ(test,
1991			clk_set_rate_range(clk,
1992					   DUMMY_CLOCK_RATE_1,
1993					   DUMMY_CLOCK_RATE_2),
1994			0);
1995
1996	rate = clk_get_rate(clk);
1997	KUNIT_ASSERT_GT(test, rate, 0);
1998	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1999
2000	clk_put(clk);
2001}
2002
2003/*
2004 * Test that if we have several subsequent calls to
2005 * clk_set_rate_range(), across multiple users, the core will reevaluate
2006 * whether a new rate is needed each and every time.
2007 *
2008 * With clk_dummy_minimize_rate_ops, this means that the rate will
2009 * trail along the minimum as it evolves.
2010 */
2011static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2012{
2013	struct clk_dummy_context *ctx = test->priv;
2014	struct clk_hw *hw = &ctx->hw;
2015	struct clk *clk = clk_hw_get_clk(hw, NULL);
2016	struct clk *user1, *user2;
2017	unsigned long rate;
2018
2019	user1 = clk_hw_get_clk(hw, NULL);
2020	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2021
2022	user2 = clk_hw_get_clk(hw, NULL);
2023	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2024
2025	KUNIT_ASSERT_EQ(test,
2026			clk_set_rate_range(user1,
2027					   DUMMY_CLOCK_RATE_1,
2028					   ULONG_MAX),
2029			0);
2030
2031	rate = clk_get_rate(clk);
2032	KUNIT_ASSERT_GT(test, rate, 0);
2033	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2034
2035	KUNIT_ASSERT_EQ(test,
2036			clk_set_rate_range(user2,
2037					   DUMMY_CLOCK_RATE_2,
2038					   ULONG_MAX),
2039			0);
2040
2041	rate = clk_get_rate(clk);
2042	KUNIT_ASSERT_GT(test, rate, 0);
2043	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2044
2045	KUNIT_ASSERT_EQ(test,
2046			clk_drop_range(user2),
2047			0);
2048
2049	rate = clk_get_rate(clk);
2050	KUNIT_ASSERT_GT(test, rate, 0);
2051	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2052
2053	clk_put(user2);
2054	clk_put(user1);
2055	clk_put(clk);
2056}
2057
2058/*
2059 * Test that if we have several subsequent calls to
2060 * clk_set_rate_range(), across multiple users, the core will reevaluate
2061 * whether a new rate is needed, including when a user drop its clock.
2062 *
2063 * With clk_dummy_minimize_rate_ops, this means that the rate will
2064 * trail along the minimum as it evolves.
2065 */
2066static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2067{
2068	struct clk_dummy_context *ctx = test->priv;
2069	struct clk_hw *hw = &ctx->hw;
2070	struct clk *clk = clk_hw_get_clk(hw, NULL);
2071	struct clk *user1, *user2;
2072	unsigned long rate;
2073
2074	user1 = clk_hw_get_clk(hw, NULL);
2075	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2076
2077	user2 = clk_hw_get_clk(hw, NULL);
2078	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2079
2080	KUNIT_ASSERT_EQ(test,
2081			clk_set_rate_range(user1,
2082					   DUMMY_CLOCK_RATE_1,
2083					   ULONG_MAX),
2084			0);
2085
2086	rate = clk_get_rate(clk);
2087	KUNIT_ASSERT_GT(test, rate, 0);
2088	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2089
2090	KUNIT_ASSERT_EQ(test,
2091			clk_set_rate_range(user2,
2092					   DUMMY_CLOCK_RATE_2,
2093					   ULONG_MAX),
2094			0);
2095
2096	rate = clk_get_rate(clk);
2097	KUNIT_ASSERT_GT(test, rate, 0);
2098	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2099
2100	clk_put(user2);
2101
2102	rate = clk_get_rate(clk);
2103	KUNIT_ASSERT_GT(test, rate, 0);
2104	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2105
2106	clk_put(user1);
2107	clk_put(clk);
2108}
2109
2110static struct kunit_case clk_range_minimize_test_cases[] = {
2111	KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2112	KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2113	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2114	{}
2115};
2116
2117/*
2118 * Test suite for a basic rate clock, without any parent.
2119 *
2120 * These tests exercise the rate range API: clk_set_rate_range(),
2121 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2122 * driver that will always try to run at the lowest possible rate.
2123 */
2124static struct kunit_suite clk_range_minimize_test_suite = {
2125	.name = "clk-range-minimize-test",
2126	.init = clk_minimize_test_init,
2127	.exit = clk_test_exit,
2128	.test_cases = clk_range_minimize_test_cases,
2129};
2130
2131struct clk_leaf_mux_ctx {
2132	struct clk_multiple_parent_ctx mux_ctx;
2133	struct clk_hw hw;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2134};
2135
2136static int
2137clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2138{
2139	struct clk_leaf_mux_ctx *ctx;
2140	const char *top_parents[2] = { "parent-0", "parent-1" };
2141	int ret;
2142
2143	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2144	if (!ctx)
2145		return -ENOMEM;
2146	test->priv = ctx;
2147
2148	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2149								    &clk_dummy_rate_ops,
2150								    0);
2151	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2152	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2153	if (ret)
2154		return ret;
2155
2156	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2157								    &clk_dummy_rate_ops,
2158								    0);
2159	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2160	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2161	if (ret)
2162		return ret;
2163
2164	ctx->mux_ctx.current_parent = 0;
2165	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2166						   &clk_multiple_parents_mux_ops,
2167						   0);
2168	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2169	if (ret)
2170		return ret;
2171
2172	ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw,
2173				      &clk_dummy_single_parent_ops,
 
 
 
 
 
 
2174				      CLK_SET_RATE_PARENT);
2175	ret = clk_hw_register(NULL, &ctx->hw);
2176	if (ret)
2177		return ret;
2178
2179	return 0;
2180}
2181
2182static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2183{
2184	struct clk_leaf_mux_ctx *ctx = test->priv;
2185
2186	clk_hw_unregister(&ctx->hw);
 
2187	clk_hw_unregister(&ctx->mux_ctx.hw);
2188	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2189	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2190}
2191
2192/*
2193 * Test that, for a clock that will forward any rate request to its
2194 * parent, the rate request structure returned by __clk_determine_rate
2195 * is sane and will be what we expect.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2196 */
2197static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test)
2198{
2199	struct clk_leaf_mux_ctx *ctx = test->priv;
2200	struct clk_hw *hw = &ctx->hw;
2201	struct clk *clk = clk_hw_get_clk(hw, NULL);
2202	struct clk_rate_request req;
2203	unsigned long rate;
2204	int ret;
 
 
 
2205
 
2206	rate = clk_get_rate(clk);
2207	KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2208
2209	clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2);
2210
2211	ret = __clk_determine_rate(hw, &req);
2212	KUNIT_ASSERT_EQ(test, ret, 0);
2213
2214	KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2215	KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2216	KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2217
2218	clk_put(clk);
2219}
2220
2221static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2222	KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate),
 
2223	{}
2224};
2225
2226/*
2227 * Test suite for a clock whose parent is a mux with multiple parents.
2228 * The leaf clock has CLK_SET_RATE_PARENT, and will forward rate
2229 * requests to the mux, which will then select which parent is the best
2230 * fit for a given rate.
2231 *
2232 * These tests exercise the behaviour of muxes, and the proper selection
2233 * of parents.
2234 */
2235static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2236	.name = "clk-leaf-mux-set-rate-parent",
2237	.init = clk_leaf_mux_set_rate_parent_test_init,
2238	.exit = clk_leaf_mux_set_rate_parent_test_exit,
2239	.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2240};
2241
2242struct clk_mux_notifier_rate_change {
2243	bool done;
2244	unsigned long old_rate;
2245	unsigned long new_rate;
2246	wait_queue_head_t wq;
2247};
2248
2249struct clk_mux_notifier_ctx {
2250	struct clk_multiple_parent_ctx mux_ctx;
2251	struct clk *clk;
2252	struct notifier_block clk_nb;
2253	struct clk_mux_notifier_rate_change pre_rate_change;
2254	struct clk_mux_notifier_rate_change post_rate_change;
2255};
2256
2257#define NOTIFIER_TIMEOUT_MS 100
2258
2259static int clk_mux_notifier_callback(struct notifier_block *nb,
2260				     unsigned long action, void *data)
2261{
2262	struct clk_notifier_data *clk_data = data;
2263	struct clk_mux_notifier_ctx *ctx = container_of(nb,
2264							struct clk_mux_notifier_ctx,
2265							clk_nb);
2266
2267	if (action & PRE_RATE_CHANGE) {
2268		ctx->pre_rate_change.old_rate = clk_data->old_rate;
2269		ctx->pre_rate_change.new_rate = clk_data->new_rate;
2270		ctx->pre_rate_change.done = true;
2271		wake_up_interruptible(&ctx->pre_rate_change.wq);
2272	}
2273
2274	if (action & POST_RATE_CHANGE) {
2275		ctx->post_rate_change.old_rate = clk_data->old_rate;
2276		ctx->post_rate_change.new_rate = clk_data->new_rate;
2277		ctx->post_rate_change.done = true;
2278		wake_up_interruptible(&ctx->post_rate_change.wq);
2279	}
2280
2281	return 0;
2282}
2283
2284static int clk_mux_notifier_test_init(struct kunit *test)
2285{
2286	struct clk_mux_notifier_ctx *ctx;
2287	const char *top_parents[2] = { "parent-0", "parent-1" };
2288	int ret;
2289
2290	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2291	if (!ctx)
2292		return -ENOMEM;
2293	test->priv = ctx;
2294	ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2295	init_waitqueue_head(&ctx->pre_rate_change.wq);
2296	init_waitqueue_head(&ctx->post_rate_change.wq);
2297
2298	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2299								    &clk_dummy_rate_ops,
2300								    0);
2301	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2302	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2303	if (ret)
2304		return ret;
2305
2306	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2307								    &clk_dummy_rate_ops,
2308								    0);
2309	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2310	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2311	if (ret)
2312		return ret;
2313
2314	ctx->mux_ctx.current_parent = 0;
2315	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2316						   &clk_multiple_parents_mux_ops,
2317						   0);
2318	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2319	if (ret)
2320		return ret;
2321
2322	ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2323	ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2324	if (ret)
2325		return ret;
2326
2327	return 0;
2328}
2329
2330static void clk_mux_notifier_test_exit(struct kunit *test)
2331{
2332	struct clk_mux_notifier_ctx *ctx = test->priv;
2333	struct clk *clk = ctx->clk;
2334
2335	clk_notifier_unregister(clk, &ctx->clk_nb);
2336	clk_put(clk);
2337
2338	clk_hw_unregister(&ctx->mux_ctx.hw);
2339	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2340	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2341}
2342
2343/*
2344 * Test that if the we have a notifier registered on a mux, the core
2345 * will notify us when we switch to another parent, and with the proper
2346 * old and new rates.
2347 */
2348static void clk_mux_notifier_set_parent_test(struct kunit *test)
2349{
2350	struct clk_mux_notifier_ctx *ctx = test->priv;
2351	struct clk_hw *hw = &ctx->mux_ctx.hw;
2352	struct clk *clk = clk_hw_get_clk(hw, NULL);
2353	struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2354	int ret;
2355
2356	ret = clk_set_parent(clk, new_parent);
2357	KUNIT_ASSERT_EQ(test, ret, 0);
2358
2359	ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2360					       ctx->pre_rate_change.done,
2361					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2362	KUNIT_ASSERT_GT(test, ret, 0);
2363
2364	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2365	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2366
2367	ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2368					       ctx->post_rate_change.done,
2369					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2370	KUNIT_ASSERT_GT(test, ret, 0);
2371
2372	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2373	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2374
2375	clk_put(new_parent);
2376	clk_put(clk);
2377}
2378
2379static struct kunit_case clk_mux_notifier_test_cases[] = {
2380	KUNIT_CASE(clk_mux_notifier_set_parent_test),
2381	{}
2382};
2383
2384/*
2385 * Test suite for a mux with multiple parents, and a notifier registered
2386 * on the mux.
2387 *
2388 * These tests exercise the behaviour of notifiers.
2389 */
2390static struct kunit_suite clk_mux_notifier_test_suite = {
2391	.name = "clk-mux-notifier",
2392	.init = clk_mux_notifier_test_init,
2393	.exit = clk_mux_notifier_test_exit,
2394	.test_cases = clk_mux_notifier_test_cases,
2395};
2396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2397kunit_test_suites(
 
2398	&clk_leaf_mux_set_rate_parent_test_suite,
2399	&clk_test_suite,
2400	&clk_multiple_parents_mux_test_suite,
 
2401	&clk_mux_notifier_test_suite,
2402	&clk_orphan_transparent_multiple_parent_mux_test_suite,
2403	&clk_orphan_transparent_single_parent_test_suite,
2404	&clk_orphan_two_level_root_last_test_suite,
2405	&clk_range_test_suite,
2406	&clk_range_maximize_test_suite,
2407	&clk_range_minimize_test_suite,
 
 
2408	&clk_single_parent_mux_test_suite,
2409	&clk_uncached_test_suite
2410);
 
2411MODULE_LICENSE("GPL v2");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Kunit tests for clk framework
   4 */
   5#include <linux/clk.h>
   6#include <linux/clk-provider.h>
   7#include <linux/clk/clk-conf.h>
   8#include <linux/of.h>
   9#include <linux/platform_device.h>
  10
  11/* Needed for clk_hw_get_clk() */
  12#include "clk.h"
  13
  14#include <kunit/clk.h>
  15#include <kunit/of.h>
  16#include <kunit/platform_device.h>
  17#include <kunit/test.h>
  18
  19#include "kunit_clk_assigned_rates.h"
  20#include "clk_parent_data_test.h"
  21
  22static const struct clk_ops empty_clk_ops = { };
  23
  24#define DUMMY_CLOCK_INIT_RATE	(42 * 1000 * 1000)
  25#define DUMMY_CLOCK_RATE_1	(142 * 1000 * 1000)
  26#define DUMMY_CLOCK_RATE_2	(242 * 1000 * 1000)
  27
  28struct clk_dummy_context {
  29	struct clk_hw hw;
  30	unsigned long rate;
  31};
  32
  33static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
  34					   unsigned long parent_rate)
  35{
  36	struct clk_dummy_context *ctx =
  37		container_of(hw, struct clk_dummy_context, hw);
  38
  39	return ctx->rate;
  40}
  41
  42static int clk_dummy_determine_rate(struct clk_hw *hw,
  43				    struct clk_rate_request *req)
  44{
  45	/* Just return the same rate without modifying it */
  46	return 0;
  47}
  48
  49static int clk_dummy_maximize_rate(struct clk_hw *hw,
  50				   struct clk_rate_request *req)
  51{
  52	/*
  53	 * If there's a maximum set, always run the clock at the maximum
  54	 * allowed.
  55	 */
  56	if (req->max_rate < ULONG_MAX)
  57		req->rate = req->max_rate;
  58
  59	return 0;
  60}
  61
  62static int clk_dummy_minimize_rate(struct clk_hw *hw,
  63				   struct clk_rate_request *req)
  64{
  65	/*
  66	 * If there's a minimum set, always run the clock at the minimum
  67	 * allowed.
  68	 */
  69	if (req->min_rate > 0)
  70		req->rate = req->min_rate;
  71
  72	return 0;
  73}
  74
  75static int clk_dummy_set_rate(struct clk_hw *hw,
  76			      unsigned long rate,
  77			      unsigned long parent_rate)
  78{
  79	struct clk_dummy_context *ctx =
  80		container_of(hw, struct clk_dummy_context, hw);
  81
  82	ctx->rate = rate;
  83	return 0;
  84}
  85
  86static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
  87{
  88	if (index >= clk_hw_get_num_parents(hw))
  89		return -EINVAL;
  90
  91	return 0;
  92}
  93
  94static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
  95{
  96	return 0;
  97}
  98
  99static const struct clk_ops clk_dummy_rate_ops = {
 100	.recalc_rate = clk_dummy_recalc_rate,
 101	.determine_rate = clk_dummy_determine_rate,
 102	.set_rate = clk_dummy_set_rate,
 103};
 104
 105static const struct clk_ops clk_dummy_maximize_rate_ops = {
 106	.recalc_rate = clk_dummy_recalc_rate,
 107	.determine_rate = clk_dummy_maximize_rate,
 108	.set_rate = clk_dummy_set_rate,
 109};
 110
 111static const struct clk_ops clk_dummy_minimize_rate_ops = {
 112	.recalc_rate = clk_dummy_recalc_rate,
 113	.determine_rate = clk_dummy_minimize_rate,
 114	.set_rate = clk_dummy_set_rate,
 115};
 116
 117static const struct clk_ops clk_dummy_single_parent_ops = {
 118	/*
 119	 * FIXME: Even though we should probably be able to use
 120	 * __clk_mux_determine_rate() here, if we use it and call
 121	 * clk_round_rate() or clk_set_rate() with a rate lower than
 122	 * what all the parents can provide, it will return -EINVAL.
 123	 *
 124	 * This is due to the fact that it has the undocumented
 125	 * behaviour to always pick up the closest rate higher than the
 126	 * requested rate. If we get something lower, it thus considers
 127	 * that it's not acceptable and will return an error.
 128	 *
 129	 * It's somewhat inconsistent and creates a weird threshold
 130	 * between rates above the parent rate which would be rounded to
 131	 * what the parent can provide, but rates below will simply
 132	 * return an error.
 133	 */
 134	.determine_rate = __clk_mux_determine_rate_closest,
 135	.set_parent = clk_dummy_single_set_parent,
 136	.get_parent = clk_dummy_single_get_parent,
 137};
 138
 139struct clk_multiple_parent_ctx {
 140	struct clk_dummy_context parents_ctx[2];
 141	struct clk_hw hw;
 142	u8 current_parent;
 143};
 144
 145static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
 146{
 147	struct clk_multiple_parent_ctx *ctx =
 148		container_of(hw, struct clk_multiple_parent_ctx, hw);
 149
 150	if (index >= clk_hw_get_num_parents(hw))
 151		return -EINVAL;
 152
 153	ctx->current_parent = index;
 154
 155	return 0;
 156}
 157
 158static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
 159{
 160	struct clk_multiple_parent_ctx *ctx =
 161		container_of(hw, struct clk_multiple_parent_ctx, hw);
 162
 163	return ctx->current_parent;
 164}
 165
 166static const struct clk_ops clk_multiple_parents_mux_ops = {
 167	.get_parent = clk_multiple_parents_mux_get_parent,
 168	.set_parent = clk_multiple_parents_mux_set_parent,
 169	.determine_rate = __clk_mux_determine_rate_closest,
 170};
 171
 172static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
 173	.determine_rate = clk_hw_determine_rate_no_reparent,
 174	.get_parent = clk_multiple_parents_mux_get_parent,
 175	.set_parent = clk_multiple_parents_mux_set_parent,
 176};
 177
 178static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
 179{
 180	struct clk_dummy_context *ctx;
 181	struct clk_init_data init = { };
 182	int ret;
 183
 184	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 185	if (!ctx)
 186		return -ENOMEM;
 187	ctx->rate = DUMMY_CLOCK_INIT_RATE;
 188	test->priv = ctx;
 189
 190	init.name = "test_dummy_rate";
 191	init.ops = ops;
 192	ctx->hw.init = &init;
 193
 194	ret = clk_hw_register(NULL, &ctx->hw);
 195	if (ret)
 196		return ret;
 197
 198	return 0;
 199}
 200
 201static int clk_test_init(struct kunit *test)
 202{
 203	return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
 204}
 205
 206static int clk_maximize_test_init(struct kunit *test)
 207{
 208	return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
 209}
 210
 211static int clk_minimize_test_init(struct kunit *test)
 212{
 213	return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
 214}
 215
 216static void clk_test_exit(struct kunit *test)
 217{
 218	struct clk_dummy_context *ctx = test->priv;
 219
 220	clk_hw_unregister(&ctx->hw);
 221}
 222
 223/*
 224 * Test that the actual rate matches what is returned by clk_get_rate()
 225 */
 226static void clk_test_get_rate(struct kunit *test)
 227{
 228	struct clk_dummy_context *ctx = test->priv;
 229	struct clk_hw *hw = &ctx->hw;
 230	struct clk *clk = clk_hw_get_clk(hw, NULL);
 231	unsigned long rate;
 232
 233	rate = clk_get_rate(clk);
 234	KUNIT_ASSERT_GT(test, rate, 0);
 235	KUNIT_EXPECT_EQ(test, rate, ctx->rate);
 236
 237	clk_put(clk);
 238}
 239
 240/*
 241 * Test that, after a call to clk_set_rate(), the rate returned by
 242 * clk_get_rate() matches.
 243 *
 244 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
 245 * modify the requested rate, which is our case in clk_dummy_rate_ops.
 246 */
 247static void clk_test_set_get_rate(struct kunit *test)
 248{
 249	struct clk_dummy_context *ctx = test->priv;
 250	struct clk_hw *hw = &ctx->hw;
 251	struct clk *clk = clk_hw_get_clk(hw, NULL);
 252	unsigned long rate;
 253
 254	KUNIT_ASSERT_EQ(test,
 255			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
 256			0);
 257
 258	rate = clk_get_rate(clk);
 259	KUNIT_ASSERT_GT(test, rate, 0);
 260	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
 261
 262	clk_put(clk);
 263}
 264
 265/*
 266 * Test that, after several calls to clk_set_rate(), the rate returned
 267 * by clk_get_rate() matches the last one.
 268 *
 269 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
 270 * modify the requested rate, which is our case in clk_dummy_rate_ops.
 271 */
 272static void clk_test_set_set_get_rate(struct kunit *test)
 273{
 274	struct clk_dummy_context *ctx = test->priv;
 275	struct clk_hw *hw = &ctx->hw;
 276	struct clk *clk = clk_hw_get_clk(hw, NULL);
 277	unsigned long rate;
 278
 279	KUNIT_ASSERT_EQ(test,
 280			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
 281			0);
 282
 283	KUNIT_ASSERT_EQ(test,
 284			clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
 285			0);
 286
 287	rate = clk_get_rate(clk);
 288	KUNIT_ASSERT_GT(test, rate, 0);
 289	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
 290
 291	clk_put(clk);
 292}
 293
 294/*
 295 * Test that clk_round_rate and clk_set_rate are consitent and will
 296 * return the same frequency.
 297 */
 298static void clk_test_round_set_get_rate(struct kunit *test)
 299{
 300	struct clk_dummy_context *ctx = test->priv;
 301	struct clk_hw *hw = &ctx->hw;
 302	struct clk *clk = clk_hw_get_clk(hw, NULL);
 303	unsigned long set_rate;
 304	long rounded_rate;
 305
 306	rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
 307	KUNIT_ASSERT_GT(test, rounded_rate, 0);
 308	KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
 309
 310	KUNIT_ASSERT_EQ(test,
 311			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
 312			0);
 313
 314	set_rate = clk_get_rate(clk);
 315	KUNIT_ASSERT_GT(test, set_rate, 0);
 316	KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
 317
 318	clk_put(clk);
 319}
 320
 321static struct kunit_case clk_test_cases[] = {
 322	KUNIT_CASE(clk_test_get_rate),
 323	KUNIT_CASE(clk_test_set_get_rate),
 324	KUNIT_CASE(clk_test_set_set_get_rate),
 325	KUNIT_CASE(clk_test_round_set_get_rate),
 326	{}
 327};
 328
 329/*
 330 * Test suite for a basic rate clock, without any parent.
 331 *
 332 * These tests exercise the rate API with simple scenarios
 333 */
 334static struct kunit_suite clk_test_suite = {
 335	.name = "clk-test",
 336	.init = clk_test_init,
 337	.exit = clk_test_exit,
 338	.test_cases = clk_test_cases,
 339};
 340
 341static int clk_uncached_test_init(struct kunit *test)
 342{
 343	struct clk_dummy_context *ctx;
 344	int ret;
 345
 346	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 347	if (!ctx)
 348		return -ENOMEM;
 349	test->priv = ctx;
 350
 351	ctx->rate = DUMMY_CLOCK_INIT_RATE;
 352	ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
 353					     &clk_dummy_rate_ops,
 354					     CLK_GET_RATE_NOCACHE);
 355
 356	ret = clk_hw_register(NULL, &ctx->hw);
 357	if (ret)
 358		return ret;
 359
 360	return 0;
 361}
 362
 363/*
 364 * Test that for an uncached clock, the clock framework doesn't cache
 365 * the rate and clk_get_rate() will return the underlying clock rate
 366 * even if it changed.
 367 */
 368static void clk_test_uncached_get_rate(struct kunit *test)
 369{
 370	struct clk_dummy_context *ctx = test->priv;
 371	struct clk_hw *hw = &ctx->hw;
 372	struct clk *clk = clk_hw_get_clk(hw, NULL);
 373	unsigned long rate;
 374
 375	rate = clk_get_rate(clk);
 376	KUNIT_ASSERT_GT(test, rate, 0);
 377	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
 378
 379	/* We change the rate behind the clock framework's back */
 380	ctx->rate = DUMMY_CLOCK_RATE_1;
 381	rate = clk_get_rate(clk);
 382	KUNIT_ASSERT_GT(test, rate, 0);
 383	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
 384
 385	clk_put(clk);
 386}
 387
 388/*
 389 * Test that for an uncached clock, clk_set_rate_range() will work
 390 * properly if the rate hasn't changed.
 391 */
 392static void clk_test_uncached_set_range(struct kunit *test)
 393{
 394	struct clk_dummy_context *ctx = test->priv;
 395	struct clk_hw *hw = &ctx->hw;
 396	struct clk *clk = clk_hw_get_clk(hw, NULL);
 397	unsigned long rate;
 398
 399	KUNIT_ASSERT_EQ(test,
 400			clk_set_rate_range(clk,
 401					   DUMMY_CLOCK_RATE_1,
 402					   DUMMY_CLOCK_RATE_2),
 403			0);
 404
 405	rate = clk_get_rate(clk);
 406	KUNIT_ASSERT_GT(test, rate, 0);
 407	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
 408	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
 409
 410	clk_put(clk);
 411}
 412
 413/*
 414 * Test that for an uncached clock, clk_set_rate_range() will work
 415 * properly if the rate has changed in hardware.
 416 *
 417 * In this case, it means that if the rate wasn't initially in the range
 418 * we're trying to set, but got changed at some point into the range
 419 * without the kernel knowing about it, its rate shouldn't be affected.
 420 */
 421static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
 422{
 423	struct clk_dummy_context *ctx = test->priv;
 424	struct clk_hw *hw = &ctx->hw;
 425	struct clk *clk = clk_hw_get_clk(hw, NULL);
 426	unsigned long rate;
 427
 428	/* We change the rate behind the clock framework's back */
 429	ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
 430	KUNIT_ASSERT_EQ(test,
 431			clk_set_rate_range(clk,
 432					   DUMMY_CLOCK_RATE_1,
 433					   DUMMY_CLOCK_RATE_2),
 434			0);
 435
 436	rate = clk_get_rate(clk);
 437	KUNIT_ASSERT_GT(test, rate, 0);
 438	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
 439
 440	clk_put(clk);
 441}
 442
 443static struct kunit_case clk_uncached_test_cases[] = {
 444	KUNIT_CASE(clk_test_uncached_get_rate),
 445	KUNIT_CASE(clk_test_uncached_set_range),
 446	KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
 447	{}
 448};
 449
 450/*
 451 * Test suite for a basic, uncached, rate clock, without any parent.
 452 *
 453 * These tests exercise the rate API with simple scenarios
 454 */
 455static struct kunit_suite clk_uncached_test_suite = {
 456	.name = "clk-uncached-test",
 457	.init = clk_uncached_test_init,
 458	.exit = clk_test_exit,
 459	.test_cases = clk_uncached_test_cases,
 460};
 461
 462static int
 463clk_multiple_parents_mux_test_init(struct kunit *test)
 464{
 465	struct clk_multiple_parent_ctx *ctx;
 466	const char *parents[2] = { "parent-0", "parent-1"};
 467	int ret;
 468
 469	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 470	if (!ctx)
 471		return -ENOMEM;
 472	test->priv = ctx;
 473
 474	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
 475							    &clk_dummy_rate_ops,
 476							    0);
 477	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
 478	ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[0].hw);
 479	if (ret)
 480		return ret;
 481
 482	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
 483							    &clk_dummy_rate_ops,
 484							    0);
 485	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
 486	ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
 487	if (ret)
 488		return ret;
 489
 490	ctx->current_parent = 0;
 491	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
 492					   &clk_multiple_parents_mux_ops,
 493					   CLK_SET_RATE_PARENT);
 494	ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
 495	if (ret)
 496		return ret;
 497
 498	return 0;
 499}
 500
 
 
 
 
 
 
 
 
 
 
 501/*
 502 * Test that for a clock with multiple parents, clk_get_parent()
 503 * actually returns the current one.
 504 */
 505static void
 506clk_test_multiple_parents_mux_get_parent(struct kunit *test)
 507{
 508	struct clk_multiple_parent_ctx *ctx = test->priv;
 509	struct clk_hw *hw = &ctx->hw;
 510	struct clk *clk = clk_hw_get_clk(hw, NULL);
 511	struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
 512
 513	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
 514
 515	clk_put(parent);
 516	clk_put(clk);
 517}
 518
 519/*
 520 * Test that for a clock with a multiple parents, clk_has_parent()
 521 * actually reports all of them as parents.
 522 */
 523static void
 524clk_test_multiple_parents_mux_has_parent(struct kunit *test)
 525{
 526	struct clk_multiple_parent_ctx *ctx = test->priv;
 527	struct clk_hw *hw = &ctx->hw;
 528	struct clk *clk = clk_hw_get_clk(hw, NULL);
 529	struct clk *parent;
 530
 531	parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
 532	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
 533	clk_put(parent);
 534
 535	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 536	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
 537	clk_put(parent);
 538
 539	clk_put(clk);
 540}
 541
 542/*
 543 * Test that for a clock with a multiple parents, if we set a range on
 544 * that clock and the parent is changed, its rate after the reparenting
 545 * is still within the range we asked for.
 546 *
 547 * FIXME: clk_set_parent() only does the reparenting but doesn't
 548 * reevaluate whether the new clock rate is within its boundaries or
 549 * not.
 550 */
 551static void
 552clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
 553{
 554	struct clk_multiple_parent_ctx *ctx = test->priv;
 555	struct clk_hw *hw = &ctx->hw;
 556	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
 557	struct clk *parent1, *parent2;
 558	unsigned long rate;
 559	int ret;
 560
 561	kunit_skip(test, "This needs to be fixed in the core.");
 562
 563	parent1 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[0].hw, NULL);
 564	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
 565	KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
 566
 567	parent2 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
 568	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
 569
 570	ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
 571	KUNIT_ASSERT_EQ(test, ret, 0);
 572
 573	ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
 574	KUNIT_ASSERT_EQ(test, ret, 0);
 575
 576	ret = clk_set_rate_range(clk,
 577				 DUMMY_CLOCK_RATE_1 - 1000,
 578				 DUMMY_CLOCK_RATE_1 + 1000);
 579	KUNIT_ASSERT_EQ(test, ret, 0);
 580
 581	ret = clk_set_parent(clk, parent2);
 582	KUNIT_ASSERT_EQ(test, ret, 0);
 583
 584	rate = clk_get_rate(clk);
 585	KUNIT_ASSERT_GT(test, rate, 0);
 586	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
 587	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
 
 
 
 
 588}
 589
 590static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
 591	KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
 592	KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
 593	KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
 594	{}
 595};
 596
 597/*
 598 * Test suite for a basic mux clock with two parents, with
 599 * CLK_SET_RATE_PARENT on the child.
 600 *
 601 * These tests exercise the consumer API and check that the state of the
 602 * child and parents are sane and consistent.
 603 */
 604static struct kunit_suite
 605clk_multiple_parents_mux_test_suite = {
 606	.name = "clk-multiple-parents-mux-test",
 607	.init = clk_multiple_parents_mux_test_init,
 
 608	.test_cases = clk_multiple_parents_mux_test_cases,
 609};
 610
 611static int
 612clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
 613{
 614	struct clk_multiple_parent_ctx *ctx;
 615	const char *parents[2] = { "missing-parent", "proper-parent"};
 616	int ret;
 617
 618	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 619	if (!ctx)
 620		return -ENOMEM;
 621	test->priv = ctx;
 622
 623	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
 624							    &clk_dummy_rate_ops,
 625							    0);
 626	ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
 627	ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
 628	if (ret)
 629		return ret;
 630
 631	ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
 632					   &clk_multiple_parents_mux_ops,
 633					   CLK_SET_RATE_PARENT);
 634	ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
 635	if (ret)
 636		return ret;
 637
 638	return 0;
 639}
 640
 
 
 
 
 
 
 
 
 
 641/*
 642 * Test that, for a mux whose current parent hasn't been registered yet and is
 643 * thus orphan, clk_get_parent() will return NULL.
 644 */
 645static void
 646clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
 647{
 648	struct clk_multiple_parent_ctx *ctx = test->priv;
 649	struct clk_hw *hw = &ctx->hw;
 650	struct clk *clk = clk_hw_get_clk(hw, NULL);
 651
 652	KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
 653
 654	clk_put(clk);
 655}
 656
 657/*
 658 * Test that, for a mux whose current parent hasn't been registered yet,
 659 * calling clk_set_parent() to a valid parent will properly update the
 660 * mux parent and its orphan status.
 661 */
 662static void
 663clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
 664{
 665	struct clk_multiple_parent_ctx *ctx = test->priv;
 666	struct clk_hw *hw = &ctx->hw;
 667	struct clk *clk = clk_hw_get_clk(hw, NULL);
 668	struct clk *parent, *new_parent;
 669	int ret;
 670
 671	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 672	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 673
 674	ret = clk_set_parent(clk, parent);
 675	KUNIT_ASSERT_EQ(test, ret, 0);
 676
 677	new_parent = clk_get_parent(clk);
 678	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 679	KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
 680
 681	clk_put(parent);
 682	clk_put(clk);
 683}
 684
 685/*
 686 * Test that, for a mux that started orphan but got switched to a valid
 687 * parent, calling clk_drop_range() on the mux won't affect the parent
 688 * rate.
 689 */
 690static void
 691clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
 692{
 693	struct clk_multiple_parent_ctx *ctx = test->priv;
 694	struct clk_hw *hw = &ctx->hw;
 695	struct clk *clk = clk_hw_get_clk(hw, NULL);
 696	struct clk *parent;
 697	unsigned long parent_rate, new_parent_rate;
 698	int ret;
 699
 700	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 701	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 702
 703	parent_rate = clk_get_rate(parent);
 704	KUNIT_ASSERT_GT(test, parent_rate, 0);
 705
 706	ret = clk_set_parent(clk, parent);
 707	KUNIT_ASSERT_EQ(test, ret, 0);
 708
 709	ret = clk_drop_range(clk);
 710	KUNIT_ASSERT_EQ(test, ret, 0);
 711
 712	new_parent_rate = clk_get_rate(clk);
 713	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
 714	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
 715
 716	clk_put(parent);
 717	clk_put(clk);
 718}
 719
 720/*
 721 * Test that, for a mux that started orphan but got switched to a valid
 722 * parent, the rate of the mux and its new parent are consistent.
 723 */
 724static void
 725clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
 726{
 727	struct clk_multiple_parent_ctx *ctx = test->priv;
 728	struct clk_hw *hw = &ctx->hw;
 729	struct clk *clk = clk_hw_get_clk(hw, NULL);
 730	struct clk *parent;
 731	unsigned long parent_rate, rate;
 732	int ret;
 733
 734	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 735	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 736
 737	parent_rate = clk_get_rate(parent);
 738	KUNIT_ASSERT_GT(test, parent_rate, 0);
 739
 740	ret = clk_set_parent(clk, parent);
 741	KUNIT_ASSERT_EQ(test, ret, 0);
 742
 743	rate = clk_get_rate(clk);
 744	KUNIT_ASSERT_GT(test, rate, 0);
 745	KUNIT_EXPECT_EQ(test, parent_rate, rate);
 746
 747	clk_put(parent);
 748	clk_put(clk);
 749}
 750
 751/*
 752 * Test that, for a mux that started orphan but got switched to a valid
 753 * parent, calling clk_put() on the mux won't affect the parent rate.
 754 */
 755static void
 756clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
 757{
 758	struct clk_multiple_parent_ctx *ctx = test->priv;
 759	struct clk *clk, *parent;
 760	unsigned long parent_rate, new_parent_rate;
 761	int ret;
 762
 763	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 764	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 765
 766	clk = clk_hw_get_clk(&ctx->hw, NULL);
 767	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
 768
 769	parent_rate = clk_get_rate(parent);
 770	KUNIT_ASSERT_GT(test, parent_rate, 0);
 771
 772	ret = clk_set_parent(clk, parent);
 773	KUNIT_ASSERT_EQ(test, ret, 0);
 774
 775	clk_put(clk);
 776
 777	new_parent_rate = clk_get_rate(parent);
 778	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
 779	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
 780
 781	clk_put(parent);
 782}
 783
 784/*
 785 * Test that, for a mux that started orphan but got switched to a valid
 786 * parent, calling clk_set_rate_range() will affect the parent state if
 787 * its rate is out of range.
 788 */
 789static void
 790clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
 791{
 792	struct clk_multiple_parent_ctx *ctx = test->priv;
 793	struct clk_hw *hw = &ctx->hw;
 794	struct clk *clk = clk_hw_get_clk(hw, NULL);
 795	struct clk *parent;
 796	unsigned long rate;
 797	int ret;
 798
 799	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 800	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 801
 802	ret = clk_set_parent(clk, parent);
 803	KUNIT_ASSERT_EQ(test, ret, 0);
 804
 805	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
 806	KUNIT_ASSERT_EQ(test, ret, 0);
 807
 808	rate = clk_get_rate(clk);
 809	KUNIT_ASSERT_GT(test, rate, 0);
 810	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
 811	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
 812
 813	clk_put(parent);
 814	clk_put(clk);
 815}
 816
 817/*
 818 * Test that, for a mux that started orphan but got switched to a valid
 819 * parent, calling clk_set_rate_range() won't affect the parent state if
 820 * its rate is within range.
 821 */
 822static void
 823clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
 824{
 825	struct clk_multiple_parent_ctx *ctx = test->priv;
 826	struct clk_hw *hw = &ctx->hw;
 827	struct clk *clk = clk_hw_get_clk(hw, NULL);
 828	struct clk *parent;
 829	unsigned long parent_rate, new_parent_rate;
 830	int ret;
 831
 832	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
 833	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 834
 835	parent_rate = clk_get_rate(parent);
 836	KUNIT_ASSERT_GT(test, parent_rate, 0);
 837
 838	ret = clk_set_parent(clk, parent);
 839	KUNIT_ASSERT_EQ(test, ret, 0);
 840
 841	ret = clk_set_rate_range(clk,
 842				 DUMMY_CLOCK_INIT_RATE - 1000,
 843				 DUMMY_CLOCK_INIT_RATE + 1000);
 844	KUNIT_ASSERT_EQ(test, ret, 0);
 845
 846	new_parent_rate = clk_get_rate(parent);
 847	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
 848	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
 849
 850	clk_put(parent);
 851	clk_put(clk);
 852}
 853
 854/*
 855 * Test that, for a mux whose current parent hasn't been registered yet,
 856 * calling clk_set_rate_range() will succeed, and will be taken into
 857 * account when rounding a rate.
 858 */
 859static void
 860clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
 861{
 862	struct clk_multiple_parent_ctx *ctx = test->priv;
 863	struct clk_hw *hw = &ctx->hw;
 864	struct clk *clk = clk_hw_get_clk(hw, NULL);
 865	long rate;
 866	int ret;
 867
 868	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
 869	KUNIT_ASSERT_EQ(test, ret, 0);
 870
 871	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
 872	KUNIT_ASSERT_GT(test, rate, 0);
 873	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
 874	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
 875
 876	clk_put(clk);
 877}
 878
 879/*
 880 * Test that, for a mux that started orphan, was assigned and rate and
 881 * then got switched to a valid parent, its rate is eventually within
 882 * range.
 883 *
 884 * FIXME: Even though we update the rate as part of clk_set_parent(), we
 885 * don't evaluate whether that new rate is within range and needs to be
 886 * adjusted.
 887 */
 888static void
 889clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
 890{
 891	struct clk_multiple_parent_ctx *ctx = test->priv;
 892	struct clk_hw *hw = &ctx->hw;
 893	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
 894	struct clk *parent;
 895	unsigned long rate;
 896	int ret;
 897
 898	kunit_skip(test, "This needs to be fixed in the core.");
 899
 900	clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
 901
 902	parent = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
 903	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
 904
 905	ret = clk_set_parent(clk, parent);
 906	KUNIT_ASSERT_EQ(test, ret, 0);
 907
 908	rate = clk_get_rate(clk);
 909	KUNIT_ASSERT_GT(test, rate, 0);
 910	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
 911	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
 
 
 
 912}
 913
 914static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
 915	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
 916	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
 917	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
 918	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
 919	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
 920	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
 921	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
 922	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
 923	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
 924	{}
 925};
 926
 927/*
 928 * Test suite for a basic mux clock with two parents. The default parent
 929 * isn't registered, only the second parent is. By default, the clock
 930 * will thus be orphan.
 931 *
 932 * These tests exercise the behaviour of the consumer API when dealing
 933 * with an orphan clock, and how we deal with the transition to a valid
 934 * parent.
 935 */
 936static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
 937	.name = "clk-orphan-transparent-multiple-parent-mux-test",
 938	.init = clk_orphan_transparent_multiple_parent_mux_test_init,
 
 939	.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
 940};
 941
 942struct clk_single_parent_ctx {
 943	struct clk_dummy_context parent_ctx;
 944	struct clk_hw hw;
 945};
 946
 947static int clk_single_parent_mux_test_init(struct kunit *test)
 948{
 949	struct clk_single_parent_ctx *ctx;
 950	int ret;
 951
 952	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
 953	if (!ctx)
 954		return -ENOMEM;
 955	test->priv = ctx;
 956
 957	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
 958	ctx->parent_ctx.hw.init =
 959		CLK_HW_INIT_NO_PARENT("parent-clk",
 960				      &clk_dummy_rate_ops,
 961				      0);
 962
 963	ret = clk_hw_register_kunit(test, NULL, &ctx->parent_ctx.hw);
 964	if (ret)
 965		return ret;
 966
 967	ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
 968				   &clk_dummy_single_parent_ops,
 969				   CLK_SET_RATE_PARENT);
 970
 971	ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
 972	if (ret)
 973		return ret;
 974
 975	return 0;
 976}
 977
 978static void
 979clk_single_parent_mux_test_exit(struct kunit *test)
 980{
 981	struct clk_single_parent_ctx *ctx = test->priv;
 982
 983	clk_hw_unregister(&ctx->hw);
 984	clk_hw_unregister(&ctx->parent_ctx.hw);
 985}
 986
 987/*
 988 * Test that for a clock with a single parent, clk_get_parent() actually
 989 * returns the parent.
 990 */
 991static void
 992clk_test_single_parent_mux_get_parent(struct kunit *test)
 993{
 994	struct clk_single_parent_ctx *ctx = test->priv;
 995	struct clk_hw *hw = &ctx->hw;
 996	struct clk *clk = clk_hw_get_clk(hw, NULL);
 997	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
 998
 999	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
1000
1001	clk_put(parent);
1002	clk_put(clk);
1003}
1004
1005/*
1006 * Test that for a clock with a single parent, clk_has_parent() actually
1007 * reports it as a parent.
1008 */
1009static void
1010clk_test_single_parent_mux_has_parent(struct kunit *test)
1011{
1012	struct clk_single_parent_ctx *ctx = test->priv;
1013	struct clk_hw *hw = &ctx->hw;
1014	struct clk *clk = clk_hw_get_clk(hw, NULL);
1015	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1016
1017	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1018
1019	clk_put(parent);
1020	clk_put(clk);
1021}
1022
1023/*
1024 * Test that for a clock that can't modify its rate and with a single
1025 * parent, if we set disjoints range on the parent and then the child,
1026 * the second will return an error.
1027 *
1028 * FIXME: clk_set_rate_range() only considers the current clock when
1029 * evaluating whether ranges are disjoints and not the upstream clocks
1030 * ranges.
1031 */
1032static void
1033clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1034{
1035	struct clk_single_parent_ctx *ctx = test->priv;
1036	struct clk_hw *hw = &ctx->hw;
1037	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
1038	struct clk *parent;
1039	int ret;
1040
1041	kunit_skip(test, "This needs to be fixed in the core.");
1042
1043	parent = clk_get_parent(clk);
1044	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1045
1046	ret = clk_set_rate_range(parent, 1000, 2000);
1047	KUNIT_ASSERT_EQ(test, ret, 0);
1048
1049	ret = clk_set_rate_range(clk, 3000, 4000);
1050	KUNIT_EXPECT_LT(test, ret, 0);
 
 
1051}
1052
1053/*
1054 * Test that for a clock that can't modify its rate and with a single
1055 * parent, if we set disjoints range on the child and then the parent,
1056 * the second will return an error.
1057 *
1058 * FIXME: clk_set_rate_range() only considers the current clock when
1059 * evaluating whether ranges are disjoints and not the downstream clocks
1060 * ranges.
1061 */
1062static void
1063clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1064{
1065	struct clk_single_parent_ctx *ctx = test->priv;
1066	struct clk_hw *hw = &ctx->hw;
1067	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
1068	struct clk *parent;
1069	int ret;
1070
1071	kunit_skip(test, "This needs to be fixed in the core.");
1072
1073	parent = clk_get_parent(clk);
1074	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1075
1076	ret = clk_set_rate_range(clk, 1000, 2000);
1077	KUNIT_ASSERT_EQ(test, ret, 0);
1078
1079	ret = clk_set_rate_range(parent, 3000, 4000);
1080	KUNIT_EXPECT_LT(test, ret, 0);
 
 
1081}
1082
1083/*
1084 * Test that for a clock that can't modify its rate and with a single
1085 * parent, if we set a range on the parent and then call
1086 * clk_round_rate(), the boundaries of the parent are taken into
1087 * account.
1088 */
1089static void
1090clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1091{
1092	struct clk_single_parent_ctx *ctx = test->priv;
1093	struct clk_hw *hw = &ctx->hw;
1094	struct clk *clk = clk_hw_get_clk(hw, NULL);
1095	struct clk *parent;
1096	long rate;
1097	int ret;
1098
1099	parent = clk_get_parent(clk);
1100	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1101
1102	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1103	KUNIT_ASSERT_EQ(test, ret, 0);
1104
1105	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1106	KUNIT_ASSERT_GT(test, rate, 0);
1107	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1108	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1109
1110	clk_put(clk);
1111}
1112
1113/*
1114 * Test that for a clock that can't modify its rate and with a single
1115 * parent, if we set a range on the parent and a more restrictive one on
1116 * the child, and then call clk_round_rate(), the boundaries of the
1117 * two clocks are taken into account.
1118 */
1119static void
1120clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1121{
1122	struct clk_single_parent_ctx *ctx = test->priv;
1123	struct clk_hw *hw = &ctx->hw;
1124	struct clk *clk = clk_hw_get_clk(hw, NULL);
1125	struct clk *parent;
1126	long rate;
1127	int ret;
1128
1129	parent = clk_get_parent(clk);
1130	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1131
1132	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1133	KUNIT_ASSERT_EQ(test, ret, 0);
1134
1135	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1136	KUNIT_ASSERT_EQ(test, ret, 0);
1137
1138	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1139	KUNIT_ASSERT_GT(test, rate, 0);
1140	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1141	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1142
1143	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1144	KUNIT_ASSERT_GT(test, rate, 0);
1145	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1146	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1147
1148	clk_put(clk);
1149}
1150
1151/*
1152 * Test that for a clock that can't modify its rate and with a single
1153 * parent, if we set a range on the child and a more restrictive one on
1154 * the parent, and then call clk_round_rate(), the boundaries of the
1155 * two clocks are taken into account.
1156 */
1157static void
1158clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1159{
1160	struct clk_single_parent_ctx *ctx = test->priv;
1161	struct clk_hw *hw = &ctx->hw;
1162	struct clk *clk = clk_hw_get_clk(hw, NULL);
1163	struct clk *parent;
1164	long rate;
1165	int ret;
1166
1167	parent = clk_get_parent(clk);
1168	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1169
1170	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1171	KUNIT_ASSERT_EQ(test, ret, 0);
1172
1173	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1174	KUNIT_ASSERT_EQ(test, ret, 0);
1175
1176	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1177	KUNIT_ASSERT_GT(test, rate, 0);
1178	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1179	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1180
1181	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1182	KUNIT_ASSERT_GT(test, rate, 0);
1183	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1184	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1185
1186	clk_put(clk);
1187}
1188
1189static struct kunit_case clk_single_parent_mux_test_cases[] = {
1190	KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1191	KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1192	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1193	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1194	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1195	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1196	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1197	{}
1198};
1199
1200/*
1201 * Test suite for a basic mux clock with one parent, with
1202 * CLK_SET_RATE_PARENT on the child.
1203 *
1204 * These tests exercise the consumer API and check that the state of the
1205 * child and parent are sane and consistent.
1206 */
1207static struct kunit_suite
1208clk_single_parent_mux_test_suite = {
1209	.name = "clk-single-parent-mux-test",
1210	.init = clk_single_parent_mux_test_init,
 
1211	.test_cases = clk_single_parent_mux_test_cases,
1212};
1213
1214static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1215{
1216	struct clk_single_parent_ctx *ctx;
1217	struct clk_init_data init = { };
1218	const char * const parents[] = { "orphan_parent" };
1219	int ret;
1220
1221	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1222	if (!ctx)
1223		return -ENOMEM;
1224	test->priv = ctx;
1225
1226	init.name = "test_orphan_dummy_parent";
1227	init.ops = &clk_dummy_single_parent_ops;
1228	init.parent_names = parents;
1229	init.num_parents = ARRAY_SIZE(parents);
1230	init.flags = CLK_SET_RATE_PARENT;
1231	ctx->hw.init = &init;
1232
1233	ret = clk_hw_register(NULL, &ctx->hw);
1234	if (ret)
1235		return ret;
1236
1237	memset(&init, 0, sizeof(init));
1238	init.name = "orphan_parent";
1239	init.ops = &clk_dummy_rate_ops;
1240	ctx->parent_ctx.hw.init = &init;
1241	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1242
1243	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1244	if (ret)
1245		return ret;
1246
1247	return 0;
1248}
1249
1250/*
1251 * Test that a mux-only clock, with an initial rate within a range,
1252 * will still have the same rate after the range has been enforced.
1253 *
1254 * See:
1255 * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1256 */
1257static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1258{
1259	struct clk_single_parent_ctx *ctx = test->priv;
1260	struct clk_hw *hw = &ctx->hw;
1261	struct clk *clk = clk_hw_get_clk(hw, NULL);
1262	unsigned long rate, new_rate;
1263
1264	rate = clk_get_rate(clk);
1265	KUNIT_ASSERT_GT(test, rate, 0);
1266
1267	KUNIT_ASSERT_EQ(test,
1268			clk_set_rate_range(clk,
1269					   ctx->parent_ctx.rate - 1000,
1270					   ctx->parent_ctx.rate + 1000),
1271			0);
1272
1273	new_rate = clk_get_rate(clk);
1274	KUNIT_ASSERT_GT(test, new_rate, 0);
1275	KUNIT_EXPECT_EQ(test, rate, new_rate);
1276
1277	clk_put(clk);
1278}
1279
1280static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1281	KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1282	{}
1283};
1284
1285/*
1286 * Test suite for a basic mux clock with one parent. The parent is
1287 * registered after its child. The clock will thus be an orphan when
1288 * registered, but will no longer be when the tests run.
1289 *
1290 * These tests make sure a clock that used to be orphan has a sane,
1291 * consistent, behaviour.
1292 */
1293static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1294	.name = "clk-orphan-transparent-single-parent-test",
1295	.init = clk_orphan_transparent_single_parent_mux_test_init,
1296	.exit = clk_single_parent_mux_test_exit,
1297	.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1298};
1299
1300struct clk_single_parent_two_lvl_ctx {
1301	struct clk_dummy_context parent_parent_ctx;
1302	struct clk_dummy_context parent_ctx;
1303	struct clk_hw hw;
1304};
1305
1306static int
1307clk_orphan_two_level_root_last_test_init(struct kunit *test)
1308{
1309	struct clk_single_parent_two_lvl_ctx *ctx;
1310	int ret;
1311
1312	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1313	if (!ctx)
1314		return -ENOMEM;
1315	test->priv = ctx;
1316
1317	ctx->parent_ctx.hw.init =
1318		CLK_HW_INIT("intermediate-parent",
1319			    "root-parent",
1320			    &clk_dummy_single_parent_ops,
1321			    CLK_SET_RATE_PARENT);
1322	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1323	if (ret)
1324		return ret;
1325
1326	ctx->hw.init =
1327		CLK_HW_INIT("test-clk", "intermediate-parent",
1328			    &clk_dummy_single_parent_ops,
1329			    CLK_SET_RATE_PARENT);
1330	ret = clk_hw_register(NULL, &ctx->hw);
1331	if (ret)
1332		return ret;
1333
1334	ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1335	ctx->parent_parent_ctx.hw.init =
1336		CLK_HW_INIT_NO_PARENT("root-parent",
1337				      &clk_dummy_rate_ops,
1338				      0);
1339	ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1340	if (ret)
1341		return ret;
1342
1343	return 0;
1344}
1345
1346static void
1347clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1348{
1349	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1350
1351	clk_hw_unregister(&ctx->hw);
1352	clk_hw_unregister(&ctx->parent_ctx.hw);
1353	clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1354}
1355
1356/*
1357 * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1358 * will return the proper rate.
1359 */
1360static void
1361clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1362{
1363	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1364	struct clk_hw *hw = &ctx->hw;
1365	struct clk *clk = clk_hw_get_clk(hw, NULL);
1366	unsigned long rate;
1367
1368	rate = clk_get_rate(clk);
1369	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1370
1371	clk_put(clk);
1372}
1373
1374/*
1375 * Test that, for a clock whose parent used to be orphan,
1376 * clk_set_rate_range() won't affect its rate if it is already within
1377 * range.
1378 *
1379 * See (for Exynos 4210):
1380 * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1381 */
1382static void
1383clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1384{
1385	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1386	struct clk_hw *hw = &ctx->hw;
1387	struct clk *clk = clk_hw_get_clk(hw, NULL);
1388	unsigned long rate;
1389	int ret;
1390
1391	ret = clk_set_rate_range(clk,
1392				 DUMMY_CLOCK_INIT_RATE - 1000,
1393				 DUMMY_CLOCK_INIT_RATE + 1000);
1394	KUNIT_ASSERT_EQ(test, ret, 0);
1395
1396	rate = clk_get_rate(clk);
1397	KUNIT_ASSERT_GT(test, rate, 0);
1398	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1399
1400	clk_put(clk);
1401}
1402
1403static struct kunit_case
1404clk_orphan_two_level_root_last_test_cases[] = {
1405	KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1406	KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1407	{}
1408};
1409
1410/*
1411 * Test suite for a basic, transparent, clock with a parent that is also
1412 * such a clock. The parent's parent is registered last, while the
1413 * parent and its child are registered in that order. The intermediate
1414 * and leaf clocks will thus be orphan when registered, but the leaf
1415 * clock itself will always have its parent and will never be
1416 * reparented. Indeed, it's only orphan because its parent is.
1417 *
1418 * These tests exercise the behaviour of the consumer API when dealing
1419 * with an orphan clock, and how we deal with the transition to a valid
1420 * parent.
1421 */
1422static struct kunit_suite
1423clk_orphan_two_level_root_last_test_suite = {
1424	.name = "clk-orphan-two-level-root-last-test",
1425	.init = clk_orphan_two_level_root_last_test_init,
1426	.exit = clk_orphan_two_level_root_last_test_exit,
1427	.test_cases = clk_orphan_two_level_root_last_test_cases,
1428};
1429
1430/*
1431 * Test that clk_set_rate_range won't return an error for a valid range
1432 * and that it will make sure the rate of the clock is within the
1433 * boundaries.
1434 */
1435static void clk_range_test_set_range(struct kunit *test)
1436{
1437	struct clk_dummy_context *ctx = test->priv;
1438	struct clk_hw *hw = &ctx->hw;
1439	struct clk *clk = clk_hw_get_clk(hw, NULL);
1440	unsigned long rate;
1441
1442	KUNIT_ASSERT_EQ(test,
1443			clk_set_rate_range(clk,
1444					   DUMMY_CLOCK_RATE_1,
1445					   DUMMY_CLOCK_RATE_2),
1446			0);
1447
1448	rate = clk_get_rate(clk);
1449	KUNIT_ASSERT_GT(test, rate, 0);
1450	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1451	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1452
1453	clk_put(clk);
1454}
1455
1456/*
1457 * Test that calling clk_set_rate_range with a minimum rate higher than
1458 * the maximum rate returns an error.
1459 */
1460static void clk_range_test_set_range_invalid(struct kunit *test)
1461{
1462	struct clk_dummy_context *ctx = test->priv;
1463	struct clk_hw *hw = &ctx->hw;
1464	struct clk *clk = clk_hw_get_clk(hw, NULL);
1465
1466	KUNIT_EXPECT_LT(test,
1467			clk_set_rate_range(clk,
1468					   DUMMY_CLOCK_RATE_1 + 1000,
1469					   DUMMY_CLOCK_RATE_1),
1470			0);
1471
1472	clk_put(clk);
1473}
1474
1475/*
1476 * Test that users can't set multiple, disjoints, range that would be
1477 * impossible to meet.
1478 */
1479static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1480{
1481	struct clk_dummy_context *ctx = test->priv;
1482	struct clk_hw *hw = &ctx->hw;
1483	struct clk *user1, *user2;
1484
1485	user1 = clk_hw_get_clk(hw, NULL);
1486	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1487
1488	user2 = clk_hw_get_clk(hw, NULL);
1489	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1490
1491	KUNIT_ASSERT_EQ(test,
1492			clk_set_rate_range(user1, 1000, 2000),
1493			0);
1494
1495	KUNIT_EXPECT_LT(test,
1496			clk_set_rate_range(user2, 3000, 4000),
1497			0);
1498
1499	clk_put(user2);
1500	clk_put(user1);
1501}
1502
1503/*
1504 * Test that if our clock has some boundaries and we try to round a rate
1505 * lower than the minimum, the returned rate will be within range.
1506 */
1507static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1508{
1509	struct clk_dummy_context *ctx = test->priv;
1510	struct clk_hw *hw = &ctx->hw;
1511	struct clk *clk = clk_hw_get_clk(hw, NULL);
1512	long rate;
1513
1514	KUNIT_ASSERT_EQ(test,
1515			clk_set_rate_range(clk,
1516					   DUMMY_CLOCK_RATE_1,
1517					   DUMMY_CLOCK_RATE_2),
1518			0);
1519
1520	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1521	KUNIT_ASSERT_GT(test, rate, 0);
1522	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1523	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1524
1525	clk_put(clk);
1526}
1527
1528/*
1529 * Test that if our clock has some boundaries and we try to set a rate
1530 * higher than the maximum, the new rate will be within range.
1531 */
1532static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1533{
1534	struct clk_dummy_context *ctx = test->priv;
1535	struct clk_hw *hw = &ctx->hw;
1536	struct clk *clk = clk_hw_get_clk(hw, NULL);
1537	unsigned long rate;
1538
1539	KUNIT_ASSERT_EQ(test,
1540			clk_set_rate_range(clk,
1541					   DUMMY_CLOCK_RATE_1,
1542					   DUMMY_CLOCK_RATE_2),
1543			0);
1544
1545	KUNIT_ASSERT_EQ(test,
1546			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1547			0);
1548
1549	rate = clk_get_rate(clk);
1550	KUNIT_ASSERT_GT(test, rate, 0);
1551	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1552	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1553
1554	clk_put(clk);
1555}
1556
1557/*
1558 * Test that if our clock has some boundaries and we try to round and
1559 * set a rate lower than the minimum, the rate returned by
1560 * clk_round_rate() will be consistent with the new rate set by
1561 * clk_set_rate().
1562 */
1563static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1564{
1565	struct clk_dummy_context *ctx = test->priv;
1566	struct clk_hw *hw = &ctx->hw;
1567	struct clk *clk = clk_hw_get_clk(hw, NULL);
1568	long rounded;
1569
1570	KUNIT_ASSERT_EQ(test,
1571			clk_set_rate_range(clk,
1572					   DUMMY_CLOCK_RATE_1,
1573					   DUMMY_CLOCK_RATE_2),
1574			0);
1575
1576	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1577	KUNIT_ASSERT_GT(test, rounded, 0);
1578
1579	KUNIT_ASSERT_EQ(test,
1580			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1581			0);
1582
1583	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1584
1585	clk_put(clk);
1586}
1587
1588/*
1589 * Test that if our clock has some boundaries and we try to round a rate
1590 * higher than the maximum, the returned rate will be within range.
1591 */
1592static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1593{
1594	struct clk_dummy_context *ctx = test->priv;
1595	struct clk_hw *hw = &ctx->hw;
1596	struct clk *clk = clk_hw_get_clk(hw, NULL);
1597	long rate;
1598
1599	KUNIT_ASSERT_EQ(test,
1600			clk_set_rate_range(clk,
1601					   DUMMY_CLOCK_RATE_1,
1602					   DUMMY_CLOCK_RATE_2),
1603			0);
1604
1605	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1606	KUNIT_ASSERT_GT(test, rate, 0);
1607	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1608	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1609
1610	clk_put(clk);
1611}
1612
1613/*
1614 * Test that if our clock has some boundaries and we try to set a rate
1615 * higher than the maximum, the new rate will be within range.
1616 */
1617static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1618{
1619	struct clk_dummy_context *ctx = test->priv;
1620	struct clk_hw *hw = &ctx->hw;
1621	struct clk *clk = clk_hw_get_clk(hw, NULL);
1622	unsigned long rate;
1623
1624	KUNIT_ASSERT_EQ(test,
1625			clk_set_rate_range(clk,
1626					   DUMMY_CLOCK_RATE_1,
1627					   DUMMY_CLOCK_RATE_2),
1628			0);
1629
1630	KUNIT_ASSERT_EQ(test,
1631			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1632			0);
1633
1634	rate = clk_get_rate(clk);
1635	KUNIT_ASSERT_GT(test, rate, 0);
1636	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1637	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1638
1639	clk_put(clk);
1640}
1641
1642/*
1643 * Test that if our clock has some boundaries and we try to round and
1644 * set a rate higher than the maximum, the rate returned by
1645 * clk_round_rate() will be consistent with the new rate set by
1646 * clk_set_rate().
1647 */
1648static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1649{
1650	struct clk_dummy_context *ctx = test->priv;
1651	struct clk_hw *hw = &ctx->hw;
1652	struct clk *clk = clk_hw_get_clk(hw, NULL);
1653	long rounded;
1654
1655	KUNIT_ASSERT_EQ(test,
1656			clk_set_rate_range(clk,
1657					   DUMMY_CLOCK_RATE_1,
1658					   DUMMY_CLOCK_RATE_2),
1659			0);
1660
1661	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1662	KUNIT_ASSERT_GT(test, rounded, 0);
1663
1664	KUNIT_ASSERT_EQ(test,
1665			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1666			0);
1667
1668	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1669
1670	clk_put(clk);
1671}
1672
1673/*
1674 * Test that if our clock has a rate lower than the minimum set by a
1675 * call to clk_set_rate_range(), the rate will be raised to match the
1676 * new minimum.
1677 *
1678 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1679 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1680 */
1681static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1682{
1683	struct clk_dummy_context *ctx = test->priv;
1684	struct clk_hw *hw = &ctx->hw;
1685	struct clk *clk = clk_hw_get_clk(hw, NULL);
1686	unsigned long rate;
1687
1688	KUNIT_ASSERT_EQ(test,
1689			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1690			0);
1691
1692	KUNIT_ASSERT_EQ(test,
1693			clk_set_rate_range(clk,
1694					   DUMMY_CLOCK_RATE_1,
1695					   DUMMY_CLOCK_RATE_2),
1696			0);
1697
1698	rate = clk_get_rate(clk);
1699	KUNIT_ASSERT_GT(test, rate, 0);
1700	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1701
1702	clk_put(clk);
1703}
1704
1705/*
1706 * Test that if our clock has a rate higher than the maximum set by a
1707 * call to clk_set_rate_range(), the rate will be lowered to match the
1708 * new maximum.
1709 *
1710 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1711 * modify the requested rate, which is our case in clk_dummy_rate_ops.
1712 */
1713static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1714{
1715	struct clk_dummy_context *ctx = test->priv;
1716	struct clk_hw *hw = &ctx->hw;
1717	struct clk *clk = clk_hw_get_clk(hw, NULL);
1718	unsigned long rate;
1719
1720	KUNIT_ASSERT_EQ(test,
1721			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1722			0);
1723
1724	KUNIT_ASSERT_EQ(test,
1725			clk_set_rate_range(clk,
1726					   DUMMY_CLOCK_RATE_1,
1727					   DUMMY_CLOCK_RATE_2),
1728			0);
1729
1730	rate = clk_get_rate(clk);
1731	KUNIT_ASSERT_GT(test, rate, 0);
1732	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1733
1734	clk_put(clk);
1735}
1736
1737static struct kunit_case clk_range_test_cases[] = {
1738	KUNIT_CASE(clk_range_test_set_range),
1739	KUNIT_CASE(clk_range_test_set_range_invalid),
1740	KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1741	KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1742	KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1743	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1744	KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1745	KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1746	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1747	KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1748	KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1749	{}
1750};
1751
1752/*
1753 * Test suite for a basic rate clock, without any parent.
1754 *
1755 * These tests exercise the rate range API: clk_set_rate_range(),
1756 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1757 */
1758static struct kunit_suite clk_range_test_suite = {
1759	.name = "clk-range-test",
1760	.init = clk_test_init,
1761	.exit = clk_test_exit,
1762	.test_cases = clk_range_test_cases,
1763};
1764
1765/*
1766 * Test that if we have several subsequent calls to
1767 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1768 * needed each and every time.
1769 *
1770 * With clk_dummy_maximize_rate_ops, this means that the rate will
1771 * trail along the maximum as it evolves.
1772 */
1773static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1774{
1775	struct clk_dummy_context *ctx = test->priv;
1776	struct clk_hw *hw = &ctx->hw;
1777	struct clk *clk = clk_hw_get_clk(hw, NULL);
1778	unsigned long rate;
1779
1780	KUNIT_ASSERT_EQ(test,
1781			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1782			0);
1783
1784	KUNIT_ASSERT_EQ(test,
1785			clk_set_rate_range(clk,
1786					   DUMMY_CLOCK_RATE_1,
1787					   DUMMY_CLOCK_RATE_2),
1788			0);
1789
1790	rate = clk_get_rate(clk);
1791	KUNIT_ASSERT_GT(test, rate, 0);
1792	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1793
1794	KUNIT_ASSERT_EQ(test,
1795			clk_set_rate_range(clk,
1796					   DUMMY_CLOCK_RATE_1,
1797					   DUMMY_CLOCK_RATE_2 - 1000),
1798			0);
1799
1800	rate = clk_get_rate(clk);
1801	KUNIT_ASSERT_GT(test, rate, 0);
1802	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1803
1804	KUNIT_ASSERT_EQ(test,
1805			clk_set_rate_range(clk,
1806					   DUMMY_CLOCK_RATE_1,
1807					   DUMMY_CLOCK_RATE_2),
1808			0);
1809
1810	rate = clk_get_rate(clk);
1811	KUNIT_ASSERT_GT(test, rate, 0);
1812	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1813
1814	clk_put(clk);
1815}
1816
1817/*
1818 * Test that if we have several subsequent calls to
1819 * clk_set_rate_range(), across multiple users, the core will reevaluate
1820 * whether a new rate is needed each and every time.
1821 *
1822 * With clk_dummy_maximize_rate_ops, this means that the rate will
1823 * trail along the maximum as it evolves.
1824 */
1825static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1826{
1827	struct clk_dummy_context *ctx = test->priv;
1828	struct clk_hw *hw = &ctx->hw;
1829	struct clk *clk = clk_hw_get_clk(hw, NULL);
1830	struct clk *user1, *user2;
1831	unsigned long rate;
1832
1833	user1 = clk_hw_get_clk(hw, NULL);
1834	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1835
1836	user2 = clk_hw_get_clk(hw, NULL);
1837	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1838
1839	KUNIT_ASSERT_EQ(test,
1840			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1841			0);
1842
1843	KUNIT_ASSERT_EQ(test,
1844			clk_set_rate_range(user1,
1845					   0,
1846					   DUMMY_CLOCK_RATE_2),
1847			0);
1848
1849	rate = clk_get_rate(clk);
1850	KUNIT_ASSERT_GT(test, rate, 0);
1851	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1852
1853	KUNIT_ASSERT_EQ(test,
1854			clk_set_rate_range(user2,
1855					   0,
1856					   DUMMY_CLOCK_RATE_1),
1857			0);
1858
1859	rate = clk_get_rate(clk);
1860	KUNIT_ASSERT_GT(test, rate, 0);
1861	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1862
1863	KUNIT_ASSERT_EQ(test,
1864			clk_drop_range(user2),
1865			0);
1866
1867	rate = clk_get_rate(clk);
1868	KUNIT_ASSERT_GT(test, rate, 0);
1869	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1870
1871	clk_put(user2);
1872	clk_put(user1);
1873	clk_put(clk);
1874}
1875
1876/*
1877 * Test that if we have several subsequent calls to
1878 * clk_set_rate_range(), across multiple users, the core will reevaluate
1879 * whether a new rate is needed, including when a user drop its clock.
1880 *
1881 * With clk_dummy_maximize_rate_ops, this means that the rate will
1882 * trail along the maximum as it evolves.
1883 */
1884static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1885{
1886	struct clk_dummy_context *ctx = test->priv;
1887	struct clk_hw *hw = &ctx->hw;
1888	struct clk *clk = clk_hw_get_clk(hw, NULL);
1889	struct clk *user1, *user2;
1890	unsigned long rate;
1891
1892	user1 = clk_hw_get_clk(hw, NULL);
1893	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1894
1895	user2 = clk_hw_get_clk(hw, NULL);
1896	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1897
1898	KUNIT_ASSERT_EQ(test,
1899			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1900			0);
1901
1902	KUNIT_ASSERT_EQ(test,
1903			clk_set_rate_range(user1,
1904					   0,
1905					   DUMMY_CLOCK_RATE_2),
1906			0);
1907
1908	rate = clk_get_rate(clk);
1909	KUNIT_ASSERT_GT(test, rate, 0);
1910	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1911
1912	KUNIT_ASSERT_EQ(test,
1913			clk_set_rate_range(user2,
1914					   0,
1915					   DUMMY_CLOCK_RATE_1),
1916			0);
1917
1918	rate = clk_get_rate(clk);
1919	KUNIT_ASSERT_GT(test, rate, 0);
1920	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1921
1922	clk_put(user2);
1923
1924	rate = clk_get_rate(clk);
1925	KUNIT_ASSERT_GT(test, rate, 0);
1926	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1927
1928	clk_put(user1);
1929	clk_put(clk);
1930}
1931
1932static struct kunit_case clk_range_maximize_test_cases[] = {
1933	KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1934	KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1935	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1936	{}
1937};
1938
1939/*
1940 * Test suite for a basic rate clock, without any parent.
1941 *
1942 * These tests exercise the rate range API: clk_set_rate_range(),
1943 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1944 * driver that will always try to run at the highest possible rate.
1945 */
1946static struct kunit_suite clk_range_maximize_test_suite = {
1947	.name = "clk-range-maximize-test",
1948	.init = clk_maximize_test_init,
1949	.exit = clk_test_exit,
1950	.test_cases = clk_range_maximize_test_cases,
1951};
1952
1953/*
1954 * Test that if we have several subsequent calls to
1955 * clk_set_rate_range(), the core will reevaluate whether a new rate is
1956 * needed each and every time.
1957 *
1958 * With clk_dummy_minimize_rate_ops, this means that the rate will
1959 * trail along the minimum as it evolves.
1960 */
1961static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1962{
1963	struct clk_dummy_context *ctx = test->priv;
1964	struct clk_hw *hw = &ctx->hw;
1965	struct clk *clk = clk_hw_get_clk(hw, NULL);
1966	unsigned long rate;
1967
1968	KUNIT_ASSERT_EQ(test,
1969			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1970			0);
1971
1972	KUNIT_ASSERT_EQ(test,
1973			clk_set_rate_range(clk,
1974					   DUMMY_CLOCK_RATE_1,
1975					   DUMMY_CLOCK_RATE_2),
1976			0);
1977
1978	rate = clk_get_rate(clk);
1979	KUNIT_ASSERT_GT(test, rate, 0);
1980	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1981
1982	KUNIT_ASSERT_EQ(test,
1983			clk_set_rate_range(clk,
1984					   DUMMY_CLOCK_RATE_1 + 1000,
1985					   DUMMY_CLOCK_RATE_2),
1986			0);
1987
1988	rate = clk_get_rate(clk);
1989	KUNIT_ASSERT_GT(test, rate, 0);
1990	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1991
1992	KUNIT_ASSERT_EQ(test,
1993			clk_set_rate_range(clk,
1994					   DUMMY_CLOCK_RATE_1,
1995					   DUMMY_CLOCK_RATE_2),
1996			0);
1997
1998	rate = clk_get_rate(clk);
1999	KUNIT_ASSERT_GT(test, rate, 0);
2000	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2001
2002	clk_put(clk);
2003}
2004
2005/*
2006 * Test that if we have several subsequent calls to
2007 * clk_set_rate_range(), across multiple users, the core will reevaluate
2008 * whether a new rate is needed each and every time.
2009 *
2010 * With clk_dummy_minimize_rate_ops, this means that the rate will
2011 * trail along the minimum as it evolves.
2012 */
2013static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2014{
2015	struct clk_dummy_context *ctx = test->priv;
2016	struct clk_hw *hw = &ctx->hw;
2017	struct clk *clk = clk_hw_get_clk(hw, NULL);
2018	struct clk *user1, *user2;
2019	unsigned long rate;
2020
2021	user1 = clk_hw_get_clk(hw, NULL);
2022	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2023
2024	user2 = clk_hw_get_clk(hw, NULL);
2025	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2026
2027	KUNIT_ASSERT_EQ(test,
2028			clk_set_rate_range(user1,
2029					   DUMMY_CLOCK_RATE_1,
2030					   ULONG_MAX),
2031			0);
2032
2033	rate = clk_get_rate(clk);
2034	KUNIT_ASSERT_GT(test, rate, 0);
2035	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2036
2037	KUNIT_ASSERT_EQ(test,
2038			clk_set_rate_range(user2,
2039					   DUMMY_CLOCK_RATE_2,
2040					   ULONG_MAX),
2041			0);
2042
2043	rate = clk_get_rate(clk);
2044	KUNIT_ASSERT_GT(test, rate, 0);
2045	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2046
2047	KUNIT_ASSERT_EQ(test,
2048			clk_drop_range(user2),
2049			0);
2050
2051	rate = clk_get_rate(clk);
2052	KUNIT_ASSERT_GT(test, rate, 0);
2053	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2054
2055	clk_put(user2);
2056	clk_put(user1);
2057	clk_put(clk);
2058}
2059
2060/*
2061 * Test that if we have several subsequent calls to
2062 * clk_set_rate_range(), across multiple users, the core will reevaluate
2063 * whether a new rate is needed, including when a user drop its clock.
2064 *
2065 * With clk_dummy_minimize_rate_ops, this means that the rate will
2066 * trail along the minimum as it evolves.
2067 */
2068static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2069{
2070	struct clk_dummy_context *ctx = test->priv;
2071	struct clk_hw *hw = &ctx->hw;
2072	struct clk *clk = clk_hw_get_clk(hw, NULL);
2073	struct clk *user1, *user2;
2074	unsigned long rate;
2075
2076	user1 = clk_hw_get_clk(hw, NULL);
2077	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2078
2079	user2 = clk_hw_get_clk(hw, NULL);
2080	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2081
2082	KUNIT_ASSERT_EQ(test,
2083			clk_set_rate_range(user1,
2084					   DUMMY_CLOCK_RATE_1,
2085					   ULONG_MAX),
2086			0);
2087
2088	rate = clk_get_rate(clk);
2089	KUNIT_ASSERT_GT(test, rate, 0);
2090	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2091
2092	KUNIT_ASSERT_EQ(test,
2093			clk_set_rate_range(user2,
2094					   DUMMY_CLOCK_RATE_2,
2095					   ULONG_MAX),
2096			0);
2097
2098	rate = clk_get_rate(clk);
2099	KUNIT_ASSERT_GT(test, rate, 0);
2100	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2101
2102	clk_put(user2);
2103
2104	rate = clk_get_rate(clk);
2105	KUNIT_ASSERT_GT(test, rate, 0);
2106	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2107
2108	clk_put(user1);
2109	clk_put(clk);
2110}
2111
2112static struct kunit_case clk_range_minimize_test_cases[] = {
2113	KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2114	KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2115	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2116	{}
2117};
2118
2119/*
2120 * Test suite for a basic rate clock, without any parent.
2121 *
2122 * These tests exercise the rate range API: clk_set_rate_range(),
2123 * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2124 * driver that will always try to run at the lowest possible rate.
2125 */
2126static struct kunit_suite clk_range_minimize_test_suite = {
2127	.name = "clk-range-minimize-test",
2128	.init = clk_minimize_test_init,
2129	.exit = clk_test_exit,
2130	.test_cases = clk_range_minimize_test_cases,
2131};
2132
2133struct clk_leaf_mux_ctx {
2134	struct clk_multiple_parent_ctx mux_ctx;
2135	struct clk_hw hw;
2136	struct clk_hw parent;
2137	struct clk_rate_request *req;
2138	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2139};
2140
2141static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
2142{
2143	struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw);
2144	int ret;
2145	struct clk_rate_request *parent_req = ctx->req;
2146
2147	clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate);
2148	ret = ctx->determine_rate_func(req->best_parent_hw, parent_req);
2149	if (ret)
2150		return ret;
2151
2152	req->rate = parent_req->rate;
2153
2154	return 0;
2155}
2156
2157static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = {
2158	.determine_rate = clk_leaf_mux_determine_rate,
2159	.set_parent = clk_dummy_single_set_parent,
2160	.get_parent = clk_dummy_single_get_parent,
2161};
2162
2163static int
2164clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2165{
2166	struct clk_leaf_mux_ctx *ctx;
2167	const char *top_parents[2] = { "parent-0", "parent-1" };
2168	int ret;
2169
2170	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2171	if (!ctx)
2172		return -ENOMEM;
2173	test->priv = ctx;
2174
2175	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2176								    &clk_dummy_rate_ops,
2177								    0);
2178	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2179	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2180	if (ret)
2181		return ret;
2182
2183	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2184								    &clk_dummy_rate_ops,
2185								    0);
2186	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2187	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2188	if (ret)
2189		return ret;
2190
2191	ctx->mux_ctx.current_parent = 0;
2192	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2193						   &clk_multiple_parents_mux_ops,
2194						   0);
2195	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2196	if (ret)
2197		return ret;
2198
2199	ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw,
2200					  &empty_clk_ops, CLK_SET_RATE_PARENT);
2201	ret = clk_hw_register(NULL, &ctx->parent);
2202	if (ret)
2203		return ret;
2204
2205	ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent,
2206				      &clk_leaf_mux_set_rate_parent_ops,
2207				      CLK_SET_RATE_PARENT);
2208	ret = clk_hw_register(NULL, &ctx->hw);
2209	if (ret)
2210		return ret;
2211
2212	return 0;
2213}
2214
2215static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2216{
2217	struct clk_leaf_mux_ctx *ctx = test->priv;
2218
2219	clk_hw_unregister(&ctx->hw);
2220	clk_hw_unregister(&ctx->parent);
2221	clk_hw_unregister(&ctx->mux_ctx.hw);
2222	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2223	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2224}
2225
2226struct clk_leaf_mux_set_rate_parent_determine_rate_test_case {
2227	const char *desc;
2228	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2229};
2230
2231static void
2232clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(
2233		const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc)
2234{
2235	strcpy(desc, t->desc);
2236}
2237
2238static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case
2239clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = {
2240	{
2241		/*
2242		 * Test that __clk_determine_rate() on the parent that can't
2243		 * change rate doesn't return a clk_rate_request structure with
2244		 * the best_parent_hw pointer pointing to the parent.
2245		 */
2246		.desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent",
2247		.determine_rate_func = __clk_determine_rate,
2248	},
2249	{
2250		/*
2251		 * Test that __clk_mux_determine_rate() on the parent that
2252		 * can't change rate doesn't return a clk_rate_request
2253		 * structure with the best_parent_hw pointer pointing to
2254		 * the parent.
2255		 */
2256		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent",
2257		.determine_rate_func = __clk_mux_determine_rate,
2258	},
2259	{
2260		/*
2261		 * Test that __clk_mux_determine_rate_closest() on the parent
2262		 * that can't change rate doesn't return a clk_rate_request
2263		 * structure with the best_parent_hw pointer pointing to
2264		 * the parent.
2265		 */
2266		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent",
2267		.determine_rate_func = __clk_mux_determine_rate_closest,
2268	},
2269	{
2270		/*
2271		 * Test that clk_hw_determine_rate_no_reparent() on the parent
2272		 * that can't change rate doesn't return a clk_rate_request
2273		 * structure with the best_parent_hw pointer pointing to
2274		 * the parent.
2275		 */
2276		.desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent",
2277		.determine_rate_func = clk_hw_determine_rate_no_reparent,
2278	},
2279};
2280
2281KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2282		  clk_leaf_mux_set_rate_parent_determine_rate_test_cases,
2283		  clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)
2284
2285/*
2286 * Test that when a clk that can't change rate itself calls a function like
2287 * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request
2288 * structure that has the best_parent_hw pointer point to the clk_hw passed
2289 * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop
2290 * forwarding clk_rate_requests to the parent") for more background.
2291 */
2292static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test)
2293{
2294	struct clk_leaf_mux_ctx *ctx = test->priv;
2295	struct clk_hw *hw = &ctx->hw;
2296	struct clk *clk = clk_hw_get_clk(hw, NULL);
2297	struct clk_rate_request req;
2298	unsigned long rate;
2299	const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param;
2300
2301	test_param = test->param_value;
2302	ctx->determine_rate_func = test_param->determine_rate_func;
2303
2304	ctx->req = &req;
2305	rate = clk_get_rate(clk);
2306	KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2307	KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2));
 
 
 
 
2308
2309	KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2310	KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2311	KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2312
2313	clk_put(clk);
2314}
2315
2316static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2317	KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2318			 clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params),
2319	{}
2320};
2321
2322/*
2323 * Test suite for a clock whose parent is a pass-through clk whose parent is a
2324 * mux with multiple parents. The leaf and pass-through clocks have the
2325 * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which
2326 * will then select which parent is the best fit for a given rate.
2327 *
2328 * These tests exercise the behaviour of muxes, and the proper selection
2329 * of parents.
2330 */
2331static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2332	.name = "clk-leaf-mux-set-rate-parent",
2333	.init = clk_leaf_mux_set_rate_parent_test_init,
2334	.exit = clk_leaf_mux_set_rate_parent_test_exit,
2335	.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2336};
2337
2338struct clk_mux_notifier_rate_change {
2339	bool done;
2340	unsigned long old_rate;
2341	unsigned long new_rate;
2342	wait_queue_head_t wq;
2343};
2344
2345struct clk_mux_notifier_ctx {
2346	struct clk_multiple_parent_ctx mux_ctx;
2347	struct clk *clk;
2348	struct notifier_block clk_nb;
2349	struct clk_mux_notifier_rate_change pre_rate_change;
2350	struct clk_mux_notifier_rate_change post_rate_change;
2351};
2352
2353#define NOTIFIER_TIMEOUT_MS 100
2354
2355static int clk_mux_notifier_callback(struct notifier_block *nb,
2356				     unsigned long action, void *data)
2357{
2358	struct clk_notifier_data *clk_data = data;
2359	struct clk_mux_notifier_ctx *ctx = container_of(nb,
2360							struct clk_mux_notifier_ctx,
2361							clk_nb);
2362
2363	if (action & PRE_RATE_CHANGE) {
2364		ctx->pre_rate_change.old_rate = clk_data->old_rate;
2365		ctx->pre_rate_change.new_rate = clk_data->new_rate;
2366		ctx->pre_rate_change.done = true;
2367		wake_up_interruptible(&ctx->pre_rate_change.wq);
2368	}
2369
2370	if (action & POST_RATE_CHANGE) {
2371		ctx->post_rate_change.old_rate = clk_data->old_rate;
2372		ctx->post_rate_change.new_rate = clk_data->new_rate;
2373		ctx->post_rate_change.done = true;
2374		wake_up_interruptible(&ctx->post_rate_change.wq);
2375	}
2376
2377	return 0;
2378}
2379
2380static int clk_mux_notifier_test_init(struct kunit *test)
2381{
2382	struct clk_mux_notifier_ctx *ctx;
2383	const char *top_parents[2] = { "parent-0", "parent-1" };
2384	int ret;
2385
2386	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2387	if (!ctx)
2388		return -ENOMEM;
2389	test->priv = ctx;
2390	ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2391	init_waitqueue_head(&ctx->pre_rate_change.wq);
2392	init_waitqueue_head(&ctx->post_rate_change.wq);
2393
2394	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2395								    &clk_dummy_rate_ops,
2396								    0);
2397	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2398	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2399	if (ret)
2400		return ret;
2401
2402	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2403								    &clk_dummy_rate_ops,
2404								    0);
2405	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2406	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2407	if (ret)
2408		return ret;
2409
2410	ctx->mux_ctx.current_parent = 0;
2411	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2412						   &clk_multiple_parents_mux_ops,
2413						   0);
2414	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2415	if (ret)
2416		return ret;
2417
2418	ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2419	ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2420	if (ret)
2421		return ret;
2422
2423	return 0;
2424}
2425
2426static void clk_mux_notifier_test_exit(struct kunit *test)
2427{
2428	struct clk_mux_notifier_ctx *ctx = test->priv;
2429	struct clk *clk = ctx->clk;
2430
2431	clk_notifier_unregister(clk, &ctx->clk_nb);
2432	clk_put(clk);
2433
2434	clk_hw_unregister(&ctx->mux_ctx.hw);
2435	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2436	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2437}
2438
2439/*
2440 * Test that if the we have a notifier registered on a mux, the core
2441 * will notify us when we switch to another parent, and with the proper
2442 * old and new rates.
2443 */
2444static void clk_mux_notifier_set_parent_test(struct kunit *test)
2445{
2446	struct clk_mux_notifier_ctx *ctx = test->priv;
2447	struct clk_hw *hw = &ctx->mux_ctx.hw;
2448	struct clk *clk = clk_hw_get_clk(hw, NULL);
2449	struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2450	int ret;
2451
2452	ret = clk_set_parent(clk, new_parent);
2453	KUNIT_ASSERT_EQ(test, ret, 0);
2454
2455	ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2456					       ctx->pre_rate_change.done,
2457					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2458	KUNIT_ASSERT_GT(test, ret, 0);
2459
2460	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2461	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2462
2463	ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2464					       ctx->post_rate_change.done,
2465					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2466	KUNIT_ASSERT_GT(test, ret, 0);
2467
2468	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2469	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2470
2471	clk_put(new_parent);
2472	clk_put(clk);
2473}
2474
2475static struct kunit_case clk_mux_notifier_test_cases[] = {
2476	KUNIT_CASE(clk_mux_notifier_set_parent_test),
2477	{}
2478};
2479
2480/*
2481 * Test suite for a mux with multiple parents, and a notifier registered
2482 * on the mux.
2483 *
2484 * These tests exercise the behaviour of notifiers.
2485 */
2486static struct kunit_suite clk_mux_notifier_test_suite = {
2487	.name = "clk-mux-notifier",
2488	.init = clk_mux_notifier_test_init,
2489	.exit = clk_mux_notifier_test_exit,
2490	.test_cases = clk_mux_notifier_test_cases,
2491};
2492
2493static int
2494clk_mux_no_reparent_test_init(struct kunit *test)
2495{
2496	struct clk_multiple_parent_ctx *ctx;
2497	const char *parents[2] = { "parent-0", "parent-1"};
2498	int ret;
2499
2500	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2501	if (!ctx)
2502		return -ENOMEM;
2503	test->priv = ctx;
2504
2505	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2506							    &clk_dummy_rate_ops,
2507							    0);
2508	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2509	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
2510	if (ret)
2511		return ret;
2512
2513	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2514							    &clk_dummy_rate_ops,
2515							    0);
2516	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2517	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
2518	if (ret)
2519		return ret;
2520
2521	ctx->current_parent = 0;
2522	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
2523					   &clk_multiple_parents_no_reparent_mux_ops,
2524					   0);
2525	ret = clk_hw_register(NULL, &ctx->hw);
2526	if (ret)
2527		return ret;
2528
2529	return 0;
2530}
2531
2532static void
2533clk_mux_no_reparent_test_exit(struct kunit *test)
2534{
2535	struct clk_multiple_parent_ctx *ctx = test->priv;
2536
2537	clk_hw_unregister(&ctx->hw);
2538	clk_hw_unregister(&ctx->parents_ctx[0].hw);
2539	clk_hw_unregister(&ctx->parents_ctx[1].hw);
2540}
2541
2542/*
2543 * Test that if the we have a mux that cannot change parent and we call
2544 * clk_round_rate() on it with a rate that should cause it to change
2545 * parent, it won't.
2546 */
2547static void clk_mux_no_reparent_round_rate(struct kunit *test)
2548{
2549	struct clk_multiple_parent_ctx *ctx = test->priv;
2550	struct clk_hw *hw = &ctx->hw;
2551	struct clk *clk = clk_hw_get_clk(hw, NULL);
2552	struct clk *other_parent, *parent;
2553	unsigned long other_parent_rate;
2554	unsigned long parent_rate;
2555	long rounded_rate;
2556
2557	parent = clk_get_parent(clk);
2558	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2559
2560	parent_rate = clk_get_rate(parent);
2561	KUNIT_ASSERT_GT(test, parent_rate, 0);
2562
2563	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2564	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2565	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2566
2567	other_parent_rate = clk_get_rate(other_parent);
2568	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2569	clk_put(other_parent);
2570
2571	rounded_rate = clk_round_rate(clk, other_parent_rate);
2572	KUNIT_ASSERT_GT(test, rounded_rate, 0);
2573	KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
2574
2575	clk_put(clk);
2576}
2577
2578/*
2579 * Test that if the we have a mux that cannot change parent and we call
2580 * clk_set_rate() on it with a rate that should cause it to change
2581 * parent, it won't.
2582 */
2583static void clk_mux_no_reparent_set_rate(struct kunit *test)
2584{
2585	struct clk_multiple_parent_ctx *ctx = test->priv;
2586	struct clk_hw *hw = &ctx->hw;
2587	struct clk *clk = clk_hw_get_clk(hw, NULL);
2588	struct clk *other_parent, *parent;
2589	unsigned long other_parent_rate;
2590	unsigned long parent_rate;
2591	unsigned long rate;
2592	int ret;
2593
2594	parent = clk_get_parent(clk);
2595	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2596
2597	parent_rate = clk_get_rate(parent);
2598	KUNIT_ASSERT_GT(test, parent_rate, 0);
2599
2600	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2601	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2602	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2603
2604	other_parent_rate = clk_get_rate(other_parent);
2605	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2606	clk_put(other_parent);
2607
2608	ret = clk_set_rate(clk, other_parent_rate);
2609	KUNIT_ASSERT_EQ(test, ret, 0);
2610
2611	rate = clk_get_rate(clk);
2612	KUNIT_ASSERT_GT(test, rate, 0);
2613	KUNIT_EXPECT_EQ(test, rate, parent_rate);
2614
2615	clk_put(clk);
2616}
2617
2618static struct kunit_case clk_mux_no_reparent_test_cases[] = {
2619	KUNIT_CASE(clk_mux_no_reparent_round_rate),
2620	KUNIT_CASE(clk_mux_no_reparent_set_rate),
2621	{}
2622};
2623
2624/*
2625 * Test suite for a clock mux that isn't allowed to change parent, using
2626 * the clk_hw_determine_rate_no_reparent() helper.
2627 *
2628 * These tests exercise that helper, and the proper selection of
2629 * rates and parents.
2630 */
2631static struct kunit_suite clk_mux_no_reparent_test_suite = {
2632	.name = "clk-mux-no-reparent",
2633	.init = clk_mux_no_reparent_test_init,
2634	.exit = clk_mux_no_reparent_test_exit,
2635	.test_cases = clk_mux_no_reparent_test_cases,
2636};
2637
2638struct clk_register_clk_parent_data_test_case {
2639	const char *desc;
2640	struct clk_parent_data pdata;
2641};
2642
2643static void
2644clk_register_clk_parent_data_test_case_to_desc(
2645		const struct clk_register_clk_parent_data_test_case *t, char *desc)
2646{
2647	strcpy(desc, t->desc);
2648}
2649
2650static const struct clk_register_clk_parent_data_test_case
2651clk_register_clk_parent_data_of_cases[] = {
2652	{
2653		/*
2654		 * Test that a clk registered with a struct device_node can
2655		 * find a parent based on struct clk_parent_data::index.
2656		 */
2657		.desc = "clk_parent_data_of_index_test",
2658		.pdata.index = 0,
2659	},
2660	{
2661		/*
2662		 * Test that a clk registered with a struct device_node can
2663		 * find a parent based on struct clk_parent_data::fwname.
2664		 */
2665		.desc = "clk_parent_data_of_fwname_test",
2666		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2667	},
2668	{
2669		/*
2670		 * Test that a clk registered with a struct device_node can
2671		 * find a parent based on struct clk_parent_data::name.
2672		 */
2673		.desc = "clk_parent_data_of_name_test",
2674		/* The index must be negative to indicate firmware not used */
2675		.pdata.index = -1,
2676		.pdata.name = CLK_PARENT_DATA_1MHZ_NAME,
2677	},
2678	{
2679		/*
2680		 * Test that a clk registered with a struct device_node can
2681		 * find a parent based on struct
2682		 * clk_parent_data::{fw_name,name}.
2683		 */
2684		.desc = "clk_parent_data_of_fwname_name_test",
2685		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2686		.pdata.name = "not_matching",
2687	},
2688	{
2689		/*
2690		 * Test that a clk registered with a struct device_node can
2691		 * find a parent based on struct clk_parent_data::{index,name}.
2692		 * Index takes priority.
2693		 */
2694		.desc = "clk_parent_data_of_index_name_priority_test",
2695		.pdata.index = 0,
2696		.pdata.name = "not_matching",
2697	},
2698	{
2699		/*
2700		 * Test that a clk registered with a struct device_node can
2701		 * find a parent based on struct
2702		 * clk_parent_data::{index,fwname,name}. The fw_name takes
2703		 * priority over index and name.
2704		 */
2705		.desc = "clk_parent_data_of_index_fwname_name_priority_test",
2706		.pdata.index = 1,
2707		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2708		.pdata.name = "not_matching",
2709	},
2710};
2711
2712KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases,
2713		  clk_register_clk_parent_data_test_case_to_desc)
2714
2715/**
2716 * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests
2717 * @np: device node of clk under test
2718 * @hw: clk_hw for clk under test
2719 */
2720struct clk_register_clk_parent_data_of_ctx {
2721	struct device_node *np;
2722	struct clk_hw hw;
2723};
2724
2725static int clk_register_clk_parent_data_of_test_init(struct kunit *test)
2726{
2727	struct clk_register_clk_parent_data_of_ctx *ctx;
2728
2729	KUNIT_ASSERT_EQ(test, 0,
2730			of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
2731
2732	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2733	if (!ctx)
2734		return -ENOMEM;
2735	test->priv = ctx;
2736
2737	ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data");
2738	if (!ctx->np)
2739		return -ENODEV;
2740
2741	of_node_put_kunit(test, ctx->np);
2742
2743	return 0;
2744}
2745
2746/*
2747 * Test that a clk registered with a struct device_node can find a parent based on
2748 * struct clk_parent_data when the hw member isn't set.
2749 */
2750static void clk_register_clk_parent_data_of_test(struct kunit *test)
2751{
2752	struct clk_register_clk_parent_data_of_ctx *ctx = test->priv;
2753	struct clk_hw *parent_hw;
2754	const struct clk_register_clk_parent_data_test_case *test_param;
2755	struct clk_init_data init = { };
2756	struct clk *expected_parent, *actual_parent;
2757
2758	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np);
2759
2760	expected_parent = of_clk_get_kunit(test, ctx->np, 0);
2761	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2762
2763	test_param = test->param_value;
2764	init.parent_data = &test_param->pdata;
2765	init.num_parents = 1;
2766	init.name = "parent_data_of_test_clk";
2767	init.ops = &clk_dummy_single_parent_ops;
2768	ctx->hw.init = &init;
2769	KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw));
2770
2771	parent_hw = clk_hw_get_parent(&ctx->hw);
2772	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2773
2774	actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2775	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2776
2777	KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2778}
2779
2780static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = {
2781	KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test,
2782			 clk_register_clk_parent_data_of_test_gen_params),
2783	{}
2784};
2785
2786/*
2787 * Test suite for registering clks with struct clk_parent_data and a struct
2788 * device_node.
2789 */
2790static struct kunit_suite clk_register_clk_parent_data_of_suite = {
2791	.name = "clk_register_clk_parent_data_of",
2792	.init = clk_register_clk_parent_data_of_test_init,
2793	.test_cases = clk_register_clk_parent_data_of_test_cases,
2794};
2795
2796/**
2797 * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
2798 * @dev: device of clk under test
2799 * @hw: clk_hw for clk under test
2800 * @pdrv: driver to attach to find @dev
2801 */
2802struct clk_register_clk_parent_data_device_ctx {
2803	struct device *dev;
2804	struct clk_hw hw;
2805	struct platform_driver pdrv;
2806};
2807
2808static inline struct clk_register_clk_parent_data_device_ctx *
2809clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
2810{
2811	return container_of(to_platform_driver(pdev->dev.driver),
2812			    struct clk_register_clk_parent_data_device_ctx, pdrv);
2813}
2814
2815static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
2816{
2817	struct clk_register_clk_parent_data_device_ctx *ctx;
2818
2819	ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
2820	ctx->dev = &pdev->dev;
2821
2822	return 0;
2823}
2824
2825static void clk_register_clk_parent_data_device_driver(struct kunit *test)
2826{
2827	struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
2828	static const struct of_device_id match_table[] = {
2829		{ .compatible = "test,clk-parent-data" },
2830		{ }
2831	};
2832
2833	ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
2834	ctx->pdrv.driver.of_match_table = match_table;
2835	ctx->pdrv.driver.name = __func__;
2836	ctx->pdrv.driver.owner = THIS_MODULE;
2837
2838	KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
2839	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
2840}
2841
2842static const struct clk_register_clk_parent_data_test_case
2843clk_register_clk_parent_data_device_cases[] = {
2844	{
2845		/*
2846		 * Test that a clk registered with a struct device can find a
2847		 * parent based on struct clk_parent_data::index.
2848		 */
2849		.desc = "clk_parent_data_device_index_test",
2850		.pdata.index = 1,
2851	},
2852	{
2853		/*
2854		 * Test that a clk registered with a struct device can find a
2855		 * parent based on struct clk_parent_data::fwname.
2856		 */
2857		.desc = "clk_parent_data_device_fwname_test",
2858		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2859	},
2860	{
2861		/*
2862		 * Test that a clk registered with a struct device can find a
2863		 * parent based on struct clk_parent_data::name.
2864		 */
2865		.desc = "clk_parent_data_device_name_test",
2866		/* The index must be negative to indicate firmware not used */
2867		.pdata.index = -1,
2868		.pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
2869	},
2870	{
2871		/*
2872		 * Test that a clk registered with a struct device can find a
2873		 * parent based on struct clk_parent_data::{fw_name,name}.
2874		 */
2875		.desc = "clk_parent_data_device_fwname_name_test",
2876		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2877		.pdata.name = "not_matching",
2878	},
2879	{
2880		/*
2881		 * Test that a clk registered with a struct device can find a
2882		 * parent based on struct clk_parent_data::{index,name}. Index
2883		 * takes priority.
2884		 */
2885		.desc = "clk_parent_data_device_index_name_priority_test",
2886		.pdata.index = 1,
2887		.pdata.name = "not_matching",
2888	},
2889	{
2890		/*
2891		 * Test that a clk registered with a struct device can find a
2892		 * parent based on struct clk_parent_data::{index,fwname,name}.
2893		 * The fw_name takes priority over index and name.
2894		 */
2895		.desc = "clk_parent_data_device_index_fwname_name_priority_test",
2896		.pdata.index = 0,
2897		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2898		.pdata.name = "not_matching",
2899	},
2900};
2901
2902KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
2903		  clk_register_clk_parent_data_device_cases,
2904		  clk_register_clk_parent_data_test_case_to_desc)
2905
2906/*
2907 * Test that a clk registered with a struct device can find a parent based on
2908 * struct clk_parent_data when the hw member isn't set.
2909 */
2910static void clk_register_clk_parent_data_device_test(struct kunit *test)
2911{
2912	struct clk_register_clk_parent_data_device_ctx *ctx;
2913	const struct clk_register_clk_parent_data_test_case *test_param;
2914	struct clk_hw *parent_hw;
2915	struct clk_init_data init = { };
2916	struct clk *expected_parent, *actual_parent;
2917
2918	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2919	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
2920	test->priv = ctx;
2921
2922	clk_register_clk_parent_data_device_driver(test);
2923
2924	expected_parent = clk_get_kunit(test, ctx->dev, "50");
2925	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2926
2927	test_param = test->param_value;
2928	init.parent_data = &test_param->pdata;
2929	init.num_parents = 1;
2930	init.name = "parent_data_device_test_clk";
2931	init.ops = &clk_dummy_single_parent_ops;
2932	ctx->hw.init = &init;
2933	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
2934
2935	parent_hw = clk_hw_get_parent(&ctx->hw);
2936	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2937
2938	actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2939	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2940
2941	KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2942}
2943
2944static const struct clk_register_clk_parent_data_test_case
2945clk_register_clk_parent_data_device_hw_cases[] = {
2946	{
2947		/*
2948		 * Test that a clk registered with a struct device can find a
2949		 * parent based on struct clk_parent_data::hw.
2950		 */
2951		.desc = "clk_parent_data_device_hw_index_test",
2952		/* The index must be negative to indicate firmware not used */
2953		.pdata.index = -1,
2954	},
2955	{
2956		/*
2957		 * Test that a clk registered with a struct device can find a
2958		 * parent based on struct clk_parent_data::hw when
2959		 * struct clk_parent_data::fw_name is set.
2960		 */
2961		.desc = "clk_parent_data_device_hw_fwname_test",
2962		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2963	},
2964	{
2965		/*
2966		 * Test that a clk registered with a struct device can find a
2967		 * parent based on struct clk_parent_data::hw when struct
2968		 * clk_parent_data::name is set.
2969		 */
2970		.desc = "clk_parent_data_device_hw_name_test",
2971		/* The index must be negative to indicate firmware not used */
2972		.pdata.index = -1,
2973		.pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
2974	},
2975	{
2976		/*
2977		 * Test that a clk registered with a struct device can find a
2978		 * parent based on struct clk_parent_data::hw when struct
2979		 * clk_parent_data::{fw_name,name} are set.
2980		 */
2981		.desc = "clk_parent_data_device_hw_fwname_name_test",
2982		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2983		.pdata.name = "not_matching",
2984	},
2985	{
2986		/*
2987		 * Test that a clk registered with a struct device can find a
2988		 * parent based on struct clk_parent_data::hw when struct
2989		 * clk_parent_data::index is set. The hw pointer takes
2990		 * priority.
2991		 */
2992		.desc = "clk_parent_data_device_hw_index_priority_test",
2993		.pdata.index = 0,
2994	},
2995	{
2996		/*
2997		 * Test that a clk registered with a struct device can find a
2998		 * parent based on struct clk_parent_data::hw when
2999		 * struct clk_parent_data::{index,fwname,name} are set.
3000		 * The hw pointer takes priority over everything else.
3001		 */
3002		.desc = "clk_parent_data_device_hw_index_fwname_name_priority_test",
3003		.pdata.index = 0,
3004		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
3005		.pdata.name = "not_matching",
3006	},
3007};
3008
3009KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
3010		  clk_register_clk_parent_data_device_hw_cases,
3011		  clk_register_clk_parent_data_test_case_to_desc)
3012
3013/*
3014 * Test that a clk registered with a struct device can find a
3015 * parent based on struct clk_parent_data::hw.
3016 */
3017static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
3018{
3019	struct clk_register_clk_parent_data_device_ctx *ctx;
3020	const struct clk_register_clk_parent_data_test_case *test_param;
3021	struct clk_dummy_context *parent;
3022	struct clk_hw *parent_hw;
3023	struct clk_parent_data pdata = { };
3024	struct clk_init_data init = { };
3025
3026	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
3027	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
3028	test->priv = ctx;
3029
3030	clk_register_clk_parent_data_device_driver(test);
3031
3032	parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
3033	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
3034
3035	parent_hw = &parent->hw;
3036	parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
3037						&clk_dummy_rate_ops, 0);
3038
3039	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
3040
3041	test_param = test->param_value;
3042	memcpy(&pdata, &test_param->pdata, sizeof(pdata));
3043	pdata.hw = parent_hw;
3044	init.parent_data = &pdata;
3045	init.num_parents = 1;
3046	init.ops = &clk_dummy_single_parent_ops;
3047	init.name = "parent_data_device_hw_test_clk";
3048	ctx->hw.init = &init;
3049	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
3050
3051	KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
3052}
3053
3054static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
3055	KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test,
3056			 clk_register_clk_parent_data_device_test_gen_params),
3057	KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test,
3058			 clk_register_clk_parent_data_device_hw_test_gen_params),
3059	{}
3060};
3061
3062static int clk_register_clk_parent_data_device_init(struct kunit *test)
3063{
3064	KUNIT_ASSERT_EQ(test, 0,
3065			of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
3066
3067	return 0;
3068}
3069
3070/*
3071 * Test suite for registering clks with struct clk_parent_data and a struct
3072 * device.
3073 */
3074static struct kunit_suite clk_register_clk_parent_data_device_suite = {
3075	.name = "clk_register_clk_parent_data_device",
3076	.init = clk_register_clk_parent_data_device_init,
3077	.test_cases = clk_register_clk_parent_data_device_test_cases,
3078};
3079
3080struct clk_assigned_rates_context {
3081	struct clk_dummy_context clk0;
3082	struct clk_dummy_context clk1;
3083};
3084
3085/*
3086 * struct clk_assigned_rates_test_param - Test parameters for clk_assigned_rates test
3087 * @desc: Test description
3088 * @overlay_begin: Pointer to start of DT overlay to apply for test
3089 * @overlay_end: Pointer to end of DT overlay to apply for test
3090 * @rate0: Initial rate of first clk
3091 * @rate1: Initial rate of second clk
3092 * @consumer_test: true if a consumer is being tested
3093 */
3094struct clk_assigned_rates_test_param {
3095	const char *desc;
3096	u8 *overlay_begin;
3097	u8 *overlay_end;
3098	unsigned long rate0;
3099	unsigned long rate1;
3100	bool consumer_test;
3101};
3102
3103#define TEST_PARAM_OVERLAY(overlay_name)				\
3104	.overlay_begin = of_overlay_begin(overlay_name),		\
3105	.overlay_end = of_overlay_end(overlay_name)
3106
3107static void
3108clk_assigned_rates_register_clk(struct kunit *test,
3109				struct clk_dummy_context *ctx,
3110				struct device_node *np, const char *name,
3111				unsigned long rate)
3112{
3113	struct clk_init_data init = { };
3114
3115	init.name = name;
3116	init.ops = &clk_dummy_rate_ops;
3117	ctx->hw.init = &init;
3118	ctx->rate = rate;
3119
3120	KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, &ctx->hw));
3121	KUNIT_ASSERT_EQ(test, ctx->rate, rate);
3122}
3123
3124/*
3125 * Does most of the work of the test:
3126 *
3127 * 1. Apply the overlay to test
3128 * 2. Register the clk or clks to test
3129 * 3. Register the clk provider
3130 * 4. Apply clk defaults to the consumer device if this is a consumer test
3131 *
3132 * The tests will set different test_param values to test different scenarios
3133 * and validate that in their test functions.
3134 */
3135static int clk_assigned_rates_test_init(struct kunit *test)
3136{
3137	struct device_node *np, *consumer;
3138	struct clk_hw_onecell_data *data;
3139	struct clk_assigned_rates_context *ctx;
3140	u32 clk_cells;
3141	const struct clk_assigned_rates_test_param *test_param;
3142
3143	test_param = test->param_value;
3144
3145	KUNIT_ASSERT_EQ(test, 0, __of_overlay_apply_kunit(test,
3146							  test_param->overlay_begin,
3147							  test_param->overlay_end));
3148
3149	KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3150		ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL));
3151	test->priv = ctx;
3152
3153	KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3154		np = of_find_compatible_node(NULL, NULL, "test,clk-assigned-rates"));
3155	of_node_put_kunit(test, np);
3156
3157	KUNIT_ASSERT_EQ(test, 0, of_property_read_u32(np, "#clock-cells", &clk_cells));
3158	/* Only support #clock-cells = <0> or <1> */
3159	KUNIT_ASSERT_LT(test, clk_cells, 2);
3160
3161	clk_assigned_rates_register_clk(test, &ctx->clk0, np,
3162					"test_assigned_rate0", test_param->rate0);
3163	if (clk_cells == 0) {
3164		KUNIT_ASSERT_EQ(test, 0,
3165				of_clk_add_hw_provider_kunit(test, np, of_clk_hw_simple_get,
3166							     &ctx->clk0.hw));
3167	} else if (clk_cells == 1) {
3168		clk_assigned_rates_register_clk(test, &ctx->clk1, np,
3169						"test_assigned_rate1", test_param->rate1);
3170
3171		KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3172			data = kunit_kzalloc(test, struct_size(data, hws, 2), GFP_KERNEL));
3173		data->num = 2;
3174		data->hws[0] = &ctx->clk0.hw;
3175		data->hws[1] = &ctx->clk1.hw;
3176
3177		KUNIT_ASSERT_EQ(test, 0,
3178				of_clk_add_hw_provider_kunit(test, np, of_clk_hw_onecell_get, data));
3179	}
3180
3181	/* Consumers are optional */
3182	if (test_param->consumer_test) {
3183		KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3184			consumer = of_find_compatible_node(NULL, NULL, "test,clk-consumer"));
3185		of_node_put_kunit(test, consumer);
3186
3187		KUNIT_ASSERT_EQ(test, 0, of_clk_set_defaults(consumer, false));
3188	}
3189
3190	return 0;
3191}
3192
3193static void clk_assigned_rates_assigns_one(struct kunit *test)
3194{
3195	struct clk_assigned_rates_context *ctx = test->priv;
3196
3197	KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
3198}
3199
3200static void clk_assigned_rates_assigns_multiple(struct kunit *test)
3201{
3202	struct clk_assigned_rates_context *ctx = test->priv;
3203
3204	KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
3205	KUNIT_EXPECT_EQ(test, ctx->clk1.rate, ASSIGNED_RATES_1_RATE);
3206}
3207
3208static void clk_assigned_rates_skips(struct kunit *test)
3209{
3210	struct clk_assigned_rates_context *ctx = test->priv;
3211	const struct clk_assigned_rates_test_param *test_param = test->param_value;
3212
3213	KUNIT_EXPECT_NE(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
3214	KUNIT_EXPECT_EQ(test, ctx->clk0.rate, test_param->rate0);
3215}
3216
3217OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one);
3218OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one_consumer);
3219OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one);
3220OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one_consumer);
3221
3222/* Test cases that assign one rate */
3223static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_one_test_params[] = {
3224	{
3225		/*
3226		 * Test that a single cell assigned-clock-rates property
3227		 * assigns the rate when the property is in the provider.
3228		 */
3229		.desc = "provider assigns",
3230		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one),
3231	},
3232	{
3233		/*
3234		 * Test that a single cell assigned-clock-rates property
3235		 * assigns the rate when the property is in the consumer.
3236		 */
3237		.desc = "consumer assigns",
3238		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one_consumer),
3239		.consumer_test = true,
3240	},
3241	{
3242		/*
3243		 * Test that a single cell assigned-clock-rates-u64 property
3244		 * assigns the rate when the property is in the provider.
3245		 */
3246		.desc = "provider assigns u64",
3247		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one),
3248	},
3249	{
3250		/*
3251		 * Test that a single cell assigned-clock-rates-u64 property
3252		 * assigns the rate when the property is in the consumer.
3253		 */
3254		.desc = "consumer assigns u64",
3255		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one_consumer),
3256		.consumer_test = true,
3257	},
3258};
3259KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_one,
3260		       clk_assigned_rates_assigns_one_test_params, desc)
3261
3262OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple);
3263OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple_consumer);
3264OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple);
3265OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple_consumer);
3266
3267/* Test cases that assign multiple rates */
3268static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_multiple_test_params[] = {
3269	{
3270		/*
3271		 * Test that a multiple cell assigned-clock-rates property
3272		 * assigns the rates when the property is in the provider.
3273		 */
3274		.desc = "provider assigns",
3275		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple),
3276	},
3277	{
3278		/*
3279		 * Test that a multiple cell assigned-clock-rates property
3280		 * assigns the rates when the property is in the consumer.
3281		 */
3282		.desc = "consumer assigns",
3283		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple_consumer),
3284		.consumer_test = true,
3285	},
3286	{
3287		/*
3288		 * Test that a single cell assigned-clock-rates-u64 property
3289		 * assigns the rate when the property is in the provider.
3290		 */
3291		.desc = "provider assigns u64",
3292		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple),
3293	},
3294	{
3295		/*
3296		 * Test that a multiple cell assigned-clock-rates-u64 property
3297		 * assigns the rates when the property is in the consumer.
3298		 */
3299		.desc = "consumer assigns u64",
3300		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple_consumer),
3301		.consumer_test = true,
3302	},
3303};
3304KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_multiple,
3305		       clk_assigned_rates_assigns_multiple_test_params,
3306		       desc)
3307
3308OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without);
3309OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without_consumer);
3310OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero);
3311OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero_consumer);
3312OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null);
3313OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null_consumer);
3314
3315/* Test cases that skip changing the rate due to malformed DT */
3316static const struct clk_assigned_rates_test_param clk_assigned_rates_skips_test_params[] = {
3317	{
3318		/*
3319		 * Test that an assigned-clock-rates property without an assigned-clocks
3320		 * property fails when the property is in the provider.
3321		 */
3322		.desc = "provider missing assigned-clocks",
3323		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without),
3324		.rate0 = 3000,
3325	},
3326	{
3327		/*
3328		 * Test that an assigned-clock-rates property without an assigned-clocks
3329		 * property fails when the property is in the consumer.
3330		 */
3331		.desc = "consumer missing assigned-clocks",
3332		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without_consumer),
3333		.rate0 = 3000,
3334		.consumer_test = true,
3335	},
3336	{
3337		/*
3338		 * Test that an assigned-clock-rates property of zero doesn't
3339		 * set a rate when the property is in the provider.
3340		 */
3341		.desc = "provider assigned-clock-rates of zero",
3342		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero),
3343		.rate0 = 3000,
3344	},
3345	{
3346		/*
3347		 * Test that an assigned-clock-rates property of zero doesn't
3348		 * set a rate when the property is in the consumer.
3349		 */
3350		.desc = "consumer assigned-clock-rates of zero",
3351		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero_consumer),
3352		.rate0 = 3000,
3353		.consumer_test = true,
3354	},
3355	{
3356		/*
3357		 * Test that an assigned-clocks property with a null phandle
3358		 * doesn't set a rate when the property is in the provider.
3359		 */
3360		.desc = "provider assigned-clocks null phandle",
3361		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null),
3362		.rate0 = 3000,
3363	},
3364	{
3365		/*
3366		 * Test that an assigned-clocks property with a null phandle
3367		 * doesn't set a rate when the property is in the consumer.
3368		 */
3369		.desc = "provider assigned-clocks null phandle",
3370		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null_consumer),
3371		.rate0 = 3000,
3372		.consumer_test = true,
3373	},
3374};
3375KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_skips,
3376		       clk_assigned_rates_skips_test_params,
3377		       desc)
3378
3379static struct kunit_case clk_assigned_rates_test_cases[] = {
3380	KUNIT_CASE_PARAM(clk_assigned_rates_assigns_one,
3381			 clk_assigned_rates_assigns_one_gen_params),
3382	KUNIT_CASE_PARAM(clk_assigned_rates_assigns_multiple,
3383			 clk_assigned_rates_assigns_multiple_gen_params),
3384	KUNIT_CASE_PARAM(clk_assigned_rates_skips,
3385			 clk_assigned_rates_skips_gen_params),
3386	{}
3387};
3388
3389/*
3390 * Test suite for assigned-clock-rates{-u64} DT property.
3391 */
3392static struct kunit_suite clk_assigned_rates_suite = {
3393	.name = "clk_assigned_rates",
3394	.test_cases = clk_assigned_rates_test_cases,
3395	.init = clk_assigned_rates_test_init,
3396};
3397
3398kunit_test_suites(
3399	&clk_assigned_rates_suite,
3400	&clk_leaf_mux_set_rate_parent_test_suite,
3401	&clk_test_suite,
3402	&clk_multiple_parents_mux_test_suite,
3403	&clk_mux_no_reparent_test_suite,
3404	&clk_mux_notifier_test_suite,
3405	&clk_orphan_transparent_multiple_parent_mux_test_suite,
3406	&clk_orphan_transparent_single_parent_test_suite,
3407	&clk_orphan_two_level_root_last_test_suite,
3408	&clk_range_test_suite,
3409	&clk_range_maximize_test_suite,
3410	&clk_range_minimize_test_suite,
3411	&clk_register_clk_parent_data_of_suite,
3412	&clk_register_clk_parent_data_device_suite,
3413	&clk_single_parent_mux_test_suite,
3414	&clk_uncached_test_suite,
3415);
3416MODULE_DESCRIPTION("Kunit tests for clk framework");
3417MODULE_LICENSE("GPL v2");