Loading...
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12#include <linux/clk-private.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/spinlock.h>
16#include <linux/err.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19
20static DEFINE_SPINLOCK(enable_lock);
21static DEFINE_MUTEX(prepare_lock);
22
23static HLIST_HEAD(clk_root_list);
24static HLIST_HEAD(clk_orphan_list);
25static LIST_HEAD(clk_notifier_list);
26
27/*** debugfs support ***/
28
29#ifdef CONFIG_COMMON_CLK_DEBUG
30#include <linux/debugfs.h>
31
32static struct dentry *rootdir;
33static struct dentry *orphandir;
34static int inited = 0;
35
36/* caller must hold prepare_lock */
37static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
38{
39 struct dentry *d;
40 int ret = -ENOMEM;
41
42 if (!clk || !pdentry) {
43 ret = -EINVAL;
44 goto out;
45 }
46
47 d = debugfs_create_dir(clk->name, pdentry);
48 if (!d)
49 goto out;
50
51 clk->dentry = d;
52
53 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
54 (u32 *)&clk->rate);
55 if (!d)
56 goto err_out;
57
58 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
59 (u32 *)&clk->flags);
60 if (!d)
61 goto err_out;
62
63 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
64 (u32 *)&clk->prepare_count);
65 if (!d)
66 goto err_out;
67
68 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
69 (u32 *)&clk->enable_count);
70 if (!d)
71 goto err_out;
72
73 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
74 (u32 *)&clk->notifier_count);
75 if (!d)
76 goto err_out;
77
78 ret = 0;
79 goto out;
80
81err_out:
82 debugfs_remove(clk->dentry);
83out:
84 return ret;
85}
86
87/* caller must hold prepare_lock */
88static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
89{
90 struct clk *child;
91 struct hlist_node *tmp;
92 int ret = -EINVAL;;
93
94 if (!clk || !pdentry)
95 goto out;
96
97 ret = clk_debug_create_one(clk, pdentry);
98
99 if (ret)
100 goto out;
101
102 hlist_for_each_entry(child, tmp, &clk->children, child_node)
103 clk_debug_create_subtree(child, clk->dentry);
104
105 ret = 0;
106out:
107 return ret;
108}
109
110/**
111 * clk_debug_register - add a clk node to the debugfs clk tree
112 * @clk: the clk being added to the debugfs clk tree
113 *
114 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
115 * initialized. Otherwise it bails out early since the debugfs clk tree
116 * will be created lazily by clk_debug_init as part of a late_initcall.
117 *
118 * Caller must hold prepare_lock. Only clk_init calls this function (so
119 * far) so this is taken care.
120 */
121static int clk_debug_register(struct clk *clk)
122{
123 struct clk *parent;
124 struct dentry *pdentry;
125 int ret = 0;
126
127 if (!inited)
128 goto out;
129
130 parent = clk->parent;
131
132 /*
133 * Check to see if a clk is a root clk. Also check that it is
134 * safe to add this clk to debugfs
135 */
136 if (!parent)
137 if (clk->flags & CLK_IS_ROOT)
138 pdentry = rootdir;
139 else
140 pdentry = orphandir;
141 else
142 if (parent->dentry)
143 pdentry = parent->dentry;
144 else
145 goto out;
146
147 ret = clk_debug_create_subtree(clk, pdentry);
148
149out:
150 return ret;
151}
152
153/**
154 * clk_debug_init - lazily create the debugfs clk tree visualization
155 *
156 * clks are often initialized very early during boot before memory can
157 * be dynamically allocated and well before debugfs is setup.
158 * clk_debug_init walks the clk tree hierarchy while holding
159 * prepare_lock and creates the topology as part of a late_initcall,
160 * thus insuring that clks initialized very early will still be
161 * represented in the debugfs clk tree. This function should only be
162 * called once at boot-time, and all other clks added dynamically will
163 * be done so with clk_debug_register.
164 */
165static int __init clk_debug_init(void)
166{
167 struct clk *clk;
168 struct hlist_node *tmp;
169
170 rootdir = debugfs_create_dir("clk", NULL);
171
172 if (!rootdir)
173 return -ENOMEM;
174
175 orphandir = debugfs_create_dir("orphans", rootdir);
176
177 if (!orphandir)
178 return -ENOMEM;
179
180 mutex_lock(&prepare_lock);
181
182 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
183 clk_debug_create_subtree(clk, rootdir);
184
185 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
186 clk_debug_create_subtree(clk, orphandir);
187
188 inited = 1;
189
190 mutex_unlock(&prepare_lock);
191
192 return 0;
193}
194late_initcall(clk_debug_init);
195#else
196static inline int clk_debug_register(struct clk *clk) { return 0; }
197#endif
198
199/* caller must hold prepare_lock */
200static void clk_disable_unused_subtree(struct clk *clk)
201{
202 struct clk *child;
203 struct hlist_node *tmp;
204 unsigned long flags;
205
206 if (!clk)
207 goto out;
208
209 hlist_for_each_entry(child, tmp, &clk->children, child_node)
210 clk_disable_unused_subtree(child);
211
212 spin_lock_irqsave(&enable_lock, flags);
213
214 if (clk->enable_count)
215 goto unlock_out;
216
217 if (clk->flags & CLK_IGNORE_UNUSED)
218 goto unlock_out;
219
220 if (__clk_is_enabled(clk) && clk->ops->disable)
221 clk->ops->disable(clk->hw);
222
223unlock_out:
224 spin_unlock_irqrestore(&enable_lock, flags);
225
226out:
227 return;
228}
229
230static int clk_disable_unused(void)
231{
232 struct clk *clk;
233 struct hlist_node *tmp;
234
235 mutex_lock(&prepare_lock);
236
237 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
238 clk_disable_unused_subtree(clk);
239
240 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
241 clk_disable_unused_subtree(clk);
242
243 mutex_unlock(&prepare_lock);
244
245 return 0;
246}
247late_initcall(clk_disable_unused);
248
249/*** helper functions ***/
250
251inline const char *__clk_get_name(struct clk *clk)
252{
253 return !clk ? NULL : clk->name;
254}
255
256inline struct clk_hw *__clk_get_hw(struct clk *clk)
257{
258 return !clk ? NULL : clk->hw;
259}
260
261inline u8 __clk_get_num_parents(struct clk *clk)
262{
263 return !clk ? -EINVAL : clk->num_parents;
264}
265
266inline struct clk *__clk_get_parent(struct clk *clk)
267{
268 return !clk ? NULL : clk->parent;
269}
270
271inline int __clk_get_enable_count(struct clk *clk)
272{
273 return !clk ? -EINVAL : clk->enable_count;
274}
275
276inline int __clk_get_prepare_count(struct clk *clk)
277{
278 return !clk ? -EINVAL : clk->prepare_count;
279}
280
281unsigned long __clk_get_rate(struct clk *clk)
282{
283 unsigned long ret;
284
285 if (!clk) {
286 ret = 0;
287 goto out;
288 }
289
290 ret = clk->rate;
291
292 if (clk->flags & CLK_IS_ROOT)
293 goto out;
294
295 if (!clk->parent)
296 ret = 0;
297
298out:
299 return ret;
300}
301
302inline unsigned long __clk_get_flags(struct clk *clk)
303{
304 return !clk ? -EINVAL : clk->flags;
305}
306
307int __clk_is_enabled(struct clk *clk)
308{
309 int ret;
310
311 if (!clk)
312 return -EINVAL;
313
314 /*
315 * .is_enabled is only mandatory for clocks that gate
316 * fall back to software usage counter if .is_enabled is missing
317 */
318 if (!clk->ops->is_enabled) {
319 ret = clk->enable_count ? 1 : 0;
320 goto out;
321 }
322
323 ret = clk->ops->is_enabled(clk->hw);
324out:
325 return ret;
326}
327
328static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
329{
330 struct clk *child;
331 struct clk *ret;
332 struct hlist_node *tmp;
333
334 if (!strcmp(clk->name, name))
335 return clk;
336
337 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
338 ret = __clk_lookup_subtree(name, child);
339 if (ret)
340 return ret;
341 }
342
343 return NULL;
344}
345
346struct clk *__clk_lookup(const char *name)
347{
348 struct clk *root_clk;
349 struct clk *ret;
350 struct hlist_node *tmp;
351
352 if (!name)
353 return NULL;
354
355 /* search the 'proper' clk tree first */
356 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
357 ret = __clk_lookup_subtree(name, root_clk);
358 if (ret)
359 return ret;
360 }
361
362 /* if not found, then search the orphan tree */
363 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
364 ret = __clk_lookup_subtree(name, root_clk);
365 if (ret)
366 return ret;
367 }
368
369 return NULL;
370}
371
372/*** clk api ***/
373
374void __clk_unprepare(struct clk *clk)
375{
376 if (!clk)
377 return;
378
379 if (WARN_ON(clk->prepare_count == 0))
380 return;
381
382 if (--clk->prepare_count > 0)
383 return;
384
385 WARN_ON(clk->enable_count > 0);
386
387 if (clk->ops->unprepare)
388 clk->ops->unprepare(clk->hw);
389
390 __clk_unprepare(clk->parent);
391}
392
393/**
394 * clk_unprepare - undo preparation of a clock source
395 * @clk: the clk being unprepare
396 *
397 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
398 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
399 * if the operation may sleep. One example is a clk which is accessed over
400 * I2c. In the complex case a clk gate operation may require a fast and a slow
401 * part. It is this reason that clk_unprepare and clk_disable are not mutually
402 * exclusive. In fact clk_disable must be called before clk_unprepare.
403 */
404void clk_unprepare(struct clk *clk)
405{
406 mutex_lock(&prepare_lock);
407 __clk_unprepare(clk);
408 mutex_unlock(&prepare_lock);
409}
410EXPORT_SYMBOL_GPL(clk_unprepare);
411
412int __clk_prepare(struct clk *clk)
413{
414 int ret = 0;
415
416 if (!clk)
417 return 0;
418
419 if (clk->prepare_count == 0) {
420 ret = __clk_prepare(clk->parent);
421 if (ret)
422 return ret;
423
424 if (clk->ops->prepare) {
425 ret = clk->ops->prepare(clk->hw);
426 if (ret) {
427 __clk_unprepare(clk->parent);
428 return ret;
429 }
430 }
431 }
432
433 clk->prepare_count++;
434
435 return 0;
436}
437
438/**
439 * clk_prepare - prepare a clock source
440 * @clk: the clk being prepared
441 *
442 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
443 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
444 * operation may sleep. One example is a clk which is accessed over I2c. In
445 * the complex case a clk ungate operation may require a fast and a slow part.
446 * It is this reason that clk_prepare and clk_enable are not mutually
447 * exclusive. In fact clk_prepare must be called before clk_enable.
448 * Returns 0 on success, -EERROR otherwise.
449 */
450int clk_prepare(struct clk *clk)
451{
452 int ret;
453
454 mutex_lock(&prepare_lock);
455 ret = __clk_prepare(clk);
456 mutex_unlock(&prepare_lock);
457
458 return ret;
459}
460EXPORT_SYMBOL_GPL(clk_prepare);
461
462static void __clk_disable(struct clk *clk)
463{
464 if (!clk)
465 return;
466
467 if (WARN_ON(clk->enable_count == 0))
468 return;
469
470 if (--clk->enable_count > 0)
471 return;
472
473 if (clk->ops->disable)
474 clk->ops->disable(clk->hw);
475
476 __clk_disable(clk->parent);
477}
478
479/**
480 * clk_disable - gate a clock
481 * @clk: the clk being gated
482 *
483 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
484 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
485 * clk if the operation is fast and will never sleep. One example is a
486 * SoC-internal clk which is controlled via simple register writes. In the
487 * complex case a clk gate operation may require a fast and a slow part. It is
488 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
489 * In fact clk_disable must be called before clk_unprepare.
490 */
491void clk_disable(struct clk *clk)
492{
493 unsigned long flags;
494
495 spin_lock_irqsave(&enable_lock, flags);
496 __clk_disable(clk);
497 spin_unlock_irqrestore(&enable_lock, flags);
498}
499EXPORT_SYMBOL_GPL(clk_disable);
500
501static int __clk_enable(struct clk *clk)
502{
503 int ret = 0;
504
505 if (!clk)
506 return 0;
507
508 if (WARN_ON(clk->prepare_count == 0))
509 return -ESHUTDOWN;
510
511 if (clk->enable_count == 0) {
512 ret = __clk_enable(clk->parent);
513
514 if (ret)
515 return ret;
516
517 if (clk->ops->enable) {
518 ret = clk->ops->enable(clk->hw);
519 if (ret) {
520 __clk_disable(clk->parent);
521 return ret;
522 }
523 }
524 }
525
526 clk->enable_count++;
527 return 0;
528}
529
530/**
531 * clk_enable - ungate a clock
532 * @clk: the clk being ungated
533 *
534 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
535 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
536 * if the operation will never sleep. One example is a SoC-internal clk which
537 * is controlled via simple register writes. In the complex case a clk ungate
538 * operation may require a fast and a slow part. It is this reason that
539 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
540 * must be called before clk_enable. Returns 0 on success, -EERROR
541 * otherwise.
542 */
543int clk_enable(struct clk *clk)
544{
545 unsigned long flags;
546 int ret;
547
548 spin_lock_irqsave(&enable_lock, flags);
549 ret = __clk_enable(clk);
550 spin_unlock_irqrestore(&enable_lock, flags);
551
552 return ret;
553}
554EXPORT_SYMBOL_GPL(clk_enable);
555
556/**
557 * clk_get_rate - return the rate of clk
558 * @clk: the clk whose rate is being returned
559 *
560 * Simply returns the cached rate of the clk. Does not query the hardware. If
561 * clk is NULL then returns 0.
562 */
563unsigned long clk_get_rate(struct clk *clk)
564{
565 unsigned long rate;
566
567 mutex_lock(&prepare_lock);
568 rate = __clk_get_rate(clk);
569 mutex_unlock(&prepare_lock);
570
571 return rate;
572}
573EXPORT_SYMBOL_GPL(clk_get_rate);
574
575/**
576 * __clk_round_rate - round the given rate for a clk
577 * @clk: round the rate of this clock
578 *
579 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
580 */
581unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
582{
583 unsigned long parent_rate = 0;
584
585 if (!clk)
586 return -EINVAL;
587
588 if (!clk->ops->round_rate) {
589 if (clk->flags & CLK_SET_RATE_PARENT)
590 return __clk_round_rate(clk->parent, rate);
591 else
592 return clk->rate;
593 }
594
595 if (clk->parent)
596 parent_rate = clk->parent->rate;
597
598 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
599}
600
601/**
602 * clk_round_rate - round the given rate for a clk
603 * @clk: the clk for which we are rounding a rate
604 * @rate: the rate which is to be rounded
605 *
606 * Takes in a rate as input and rounds it to a rate that the clk can actually
607 * use which is then returned. If clk doesn't support round_rate operation
608 * then the parent rate is returned.
609 */
610long clk_round_rate(struct clk *clk, unsigned long rate)
611{
612 unsigned long ret;
613
614 mutex_lock(&prepare_lock);
615 ret = __clk_round_rate(clk, rate);
616 mutex_unlock(&prepare_lock);
617
618 return ret;
619}
620EXPORT_SYMBOL_GPL(clk_round_rate);
621
622/**
623 * __clk_notify - call clk notifier chain
624 * @clk: struct clk * that is changing rate
625 * @msg: clk notifier type (see include/linux/clk.h)
626 * @old_rate: old clk rate
627 * @new_rate: new clk rate
628 *
629 * Triggers a notifier call chain on the clk rate-change notification
630 * for 'clk'. Passes a pointer to the struct clk and the previous
631 * and current rates to the notifier callback. Intended to be called by
632 * internal clock code only. Returns NOTIFY_DONE from the last driver
633 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
634 * a driver returns that.
635 */
636static int __clk_notify(struct clk *clk, unsigned long msg,
637 unsigned long old_rate, unsigned long new_rate)
638{
639 struct clk_notifier *cn;
640 struct clk_notifier_data cnd;
641 int ret = NOTIFY_DONE;
642
643 cnd.clk = clk;
644 cnd.old_rate = old_rate;
645 cnd.new_rate = new_rate;
646
647 list_for_each_entry(cn, &clk_notifier_list, node) {
648 if (cn->clk == clk) {
649 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
650 &cnd);
651 break;
652 }
653 }
654
655 return ret;
656}
657
658/**
659 * __clk_recalc_rates
660 * @clk: first clk in the subtree
661 * @msg: notification type (see include/linux/clk.h)
662 *
663 * Walks the subtree of clks starting with clk and recalculates rates as it
664 * goes. Note that if a clk does not implement the .recalc_rate callback then
665 * it is assumed that the clock will take on the rate of it's parent.
666 *
667 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
668 * if necessary.
669 *
670 * Caller must hold prepare_lock.
671 */
672static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
673{
674 unsigned long old_rate;
675 unsigned long parent_rate = 0;
676 struct hlist_node *tmp;
677 struct clk *child;
678
679 old_rate = clk->rate;
680
681 if (clk->parent)
682 parent_rate = clk->parent->rate;
683
684 if (clk->ops->recalc_rate)
685 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
686 else
687 clk->rate = parent_rate;
688
689 /*
690 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
691 * & ABORT_RATE_CHANGE notifiers
692 */
693 if (clk->notifier_count && msg)
694 __clk_notify(clk, msg, old_rate, clk->rate);
695
696 hlist_for_each_entry(child, tmp, &clk->children, child_node)
697 __clk_recalc_rates(child, msg);
698}
699
700/**
701 * __clk_speculate_rates
702 * @clk: first clk in the subtree
703 * @parent_rate: the "future" rate of clk's parent
704 *
705 * Walks the subtree of clks starting with clk, speculating rates as it
706 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
707 *
708 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
709 * pre-rate change notifications and returns early if no clks in the
710 * subtree have subscribed to the notifications. Note that if a clk does not
711 * implement the .recalc_rate callback then it is assumed that the clock will
712 * take on the rate of it's parent.
713 *
714 * Caller must hold prepare_lock.
715 */
716static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
717{
718 struct hlist_node *tmp;
719 struct clk *child;
720 unsigned long new_rate;
721 int ret = NOTIFY_DONE;
722
723 if (clk->ops->recalc_rate)
724 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
725 else
726 new_rate = parent_rate;
727
728 /* abort the rate change if a driver returns NOTIFY_BAD */
729 if (clk->notifier_count)
730 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
731
732 if (ret == NOTIFY_BAD)
733 goto out;
734
735 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
736 ret = __clk_speculate_rates(child, new_rate);
737 if (ret == NOTIFY_BAD)
738 break;
739 }
740
741out:
742 return ret;
743}
744
745static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
746{
747 struct clk *child;
748 struct hlist_node *tmp;
749
750 clk->new_rate = new_rate;
751
752 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
753 if (child->ops->recalc_rate)
754 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
755 else
756 child->new_rate = new_rate;
757 clk_calc_subtree(child, child->new_rate);
758 }
759}
760
761/*
762 * calculate the new rates returning the topmost clock that has to be
763 * changed.
764 */
765static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
766{
767 struct clk *top = clk;
768 unsigned long best_parent_rate = 0;
769 unsigned long new_rate;
770
771 /* sanity */
772 if (IS_ERR_OR_NULL(clk))
773 return NULL;
774
775 /* save parent rate, if it exists */
776 if (clk->parent)
777 best_parent_rate = clk->parent->rate;
778
779 /* never propagate up to the parent */
780 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
781 if (!clk->ops->round_rate) {
782 clk->new_rate = clk->rate;
783 return NULL;
784 }
785 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
786 goto out;
787 }
788
789 /* need clk->parent from here on out */
790 if (!clk->parent) {
791 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
792 return NULL;
793 }
794
795 if (!clk->ops->round_rate) {
796 top = clk_calc_new_rates(clk->parent, rate);
797 new_rate = clk->parent->new_rate;
798
799 goto out;
800 }
801
802 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
803
804 if (best_parent_rate != clk->parent->rate) {
805 top = clk_calc_new_rates(clk->parent, best_parent_rate);
806
807 goto out;
808 }
809
810out:
811 clk_calc_subtree(clk, new_rate);
812
813 return top;
814}
815
816/*
817 * Notify about rate changes in a subtree. Always walk down the whole tree
818 * so that in case of an error we can walk down the whole tree again and
819 * abort the change.
820 */
821static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
822{
823 struct hlist_node *tmp;
824 struct clk *child, *fail_clk = NULL;
825 int ret = NOTIFY_DONE;
826
827 if (clk->rate == clk->new_rate)
828 return 0;
829
830 if (clk->notifier_count) {
831 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
832 if (ret == NOTIFY_BAD)
833 fail_clk = clk;
834 }
835
836 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
837 clk = clk_propagate_rate_change(child, event);
838 if (clk)
839 fail_clk = clk;
840 }
841
842 return fail_clk;
843}
844
845/*
846 * walk down a subtree and set the new rates notifying the rate
847 * change on the way
848 */
849static void clk_change_rate(struct clk *clk)
850{
851 struct clk *child;
852 unsigned long old_rate;
853 unsigned long best_parent_rate = 0;
854 struct hlist_node *tmp;
855
856 old_rate = clk->rate;
857
858 if (clk->parent)
859 best_parent_rate = clk->parent->rate;
860
861 if (clk->ops->set_rate)
862 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
863
864 if (clk->ops->recalc_rate)
865 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
866 else
867 clk->rate = best_parent_rate;
868
869 if (clk->notifier_count && old_rate != clk->rate)
870 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
871
872 hlist_for_each_entry(child, tmp, &clk->children, child_node)
873 clk_change_rate(child);
874}
875
876/**
877 * clk_set_rate - specify a new rate for clk
878 * @clk: the clk whose rate is being changed
879 * @rate: the new rate for clk
880 *
881 * In the simplest case clk_set_rate will only adjust the rate of clk.
882 *
883 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
884 * propagate up to clk's parent; whether or not this happens depends on the
885 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
886 * after calling .round_rate then upstream parent propagation is ignored. If
887 * *parent_rate comes back with a new rate for clk's parent then we propagate
888 * up to clk's parent and set it's rate. Upward propagation will continue
889 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
890 * .round_rate stops requesting changes to clk's parent_rate.
891 *
892 * Rate changes are accomplished via tree traversal that also recalculates the
893 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
894 *
895 * Returns 0 on success, -EERROR otherwise.
896 */
897int clk_set_rate(struct clk *clk, unsigned long rate)
898{
899 struct clk *top, *fail_clk;
900 int ret = 0;
901
902 /* prevent racing with updates to the clock topology */
903 mutex_lock(&prepare_lock);
904
905 /* bail early if nothing to do */
906 if (rate == clk->rate)
907 goto out;
908
909 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
910 ret = -EBUSY;
911 goto out;
912 }
913
914 /* calculate new rates and get the topmost changed clock */
915 top = clk_calc_new_rates(clk, rate);
916 if (!top) {
917 ret = -EINVAL;
918 goto out;
919 }
920
921 /* notify that we are about to change rates */
922 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
923 if (fail_clk) {
924 pr_warn("%s: failed to set %s rate\n", __func__,
925 fail_clk->name);
926 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
927 ret = -EBUSY;
928 goto out;
929 }
930
931 /* change the rates */
932 clk_change_rate(top);
933
934 mutex_unlock(&prepare_lock);
935
936 return 0;
937out:
938 mutex_unlock(&prepare_lock);
939
940 return ret;
941}
942EXPORT_SYMBOL_GPL(clk_set_rate);
943
944/**
945 * clk_get_parent - return the parent of a clk
946 * @clk: the clk whose parent gets returned
947 *
948 * Simply returns clk->parent. Returns NULL if clk is NULL.
949 */
950struct clk *clk_get_parent(struct clk *clk)
951{
952 struct clk *parent;
953
954 mutex_lock(&prepare_lock);
955 parent = __clk_get_parent(clk);
956 mutex_unlock(&prepare_lock);
957
958 return parent;
959}
960EXPORT_SYMBOL_GPL(clk_get_parent);
961
962/*
963 * .get_parent is mandatory for clocks with multiple possible parents. It is
964 * optional for single-parent clocks. Always call .get_parent if it is
965 * available and WARN if it is missing for multi-parent clocks.
966 *
967 * For single-parent clocks without .get_parent, first check to see if the
968 * .parents array exists, and if so use it to avoid an expensive tree
969 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
970 */
971static struct clk *__clk_init_parent(struct clk *clk)
972{
973 struct clk *ret = NULL;
974 u8 index;
975
976 /* handle the trivial cases */
977
978 if (!clk->num_parents)
979 goto out;
980
981 if (clk->num_parents == 1) {
982 if (IS_ERR_OR_NULL(clk->parent))
983 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
984 ret = clk->parent;
985 goto out;
986 }
987
988 if (!clk->ops->get_parent) {
989 WARN(!clk->ops->get_parent,
990 "%s: multi-parent clocks must implement .get_parent\n",
991 __func__);
992 goto out;
993 };
994
995 /*
996 * Do our best to cache parent clocks in clk->parents. This prevents
997 * unnecessary and expensive calls to __clk_lookup. We don't set
998 * clk->parent here; that is done by the calling function
999 */
1000
1001 index = clk->ops->get_parent(clk->hw);
1002
1003 if (!clk->parents)
1004 clk->parents =
1005 kzalloc((sizeof(struct clk*) * clk->num_parents),
1006 GFP_KERNEL);
1007
1008 if (!clk->parents)
1009 ret = __clk_lookup(clk->parent_names[index]);
1010 else if (!clk->parents[index])
1011 ret = clk->parents[index] =
1012 __clk_lookup(clk->parent_names[index]);
1013 else
1014 ret = clk->parents[index];
1015
1016out:
1017 return ret;
1018}
1019
1020void __clk_reparent(struct clk *clk, struct clk *new_parent)
1021{
1022#ifdef CONFIG_COMMON_CLK_DEBUG
1023 struct dentry *d;
1024 struct dentry *new_parent_d;
1025#endif
1026
1027 if (!clk || !new_parent)
1028 return;
1029
1030 hlist_del(&clk->child_node);
1031
1032 if (new_parent)
1033 hlist_add_head(&clk->child_node, &new_parent->children);
1034 else
1035 hlist_add_head(&clk->child_node, &clk_orphan_list);
1036
1037#ifdef CONFIG_COMMON_CLK_DEBUG
1038 if (!inited)
1039 goto out;
1040
1041 if (new_parent)
1042 new_parent_d = new_parent->dentry;
1043 else
1044 new_parent_d = orphandir;
1045
1046 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1047 new_parent_d, clk->name);
1048 if (d)
1049 clk->dentry = d;
1050 else
1051 pr_debug("%s: failed to rename debugfs entry for %s\n",
1052 __func__, clk->name);
1053out:
1054#endif
1055
1056 clk->parent = new_parent;
1057
1058 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1059}
1060
1061static int __clk_set_parent(struct clk *clk, struct clk *parent)
1062{
1063 struct clk *old_parent;
1064 unsigned long flags;
1065 int ret = -EINVAL;
1066 u8 i;
1067
1068 old_parent = clk->parent;
1069
1070 if (!clk->parents)
1071 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1072 GFP_KERNEL);
1073
1074 /*
1075 * find index of new parent clock using cached parent ptrs,
1076 * or if not yet cached, use string name comparison and cache
1077 * them now to avoid future calls to __clk_lookup.
1078 */
1079 for (i = 0; i < clk->num_parents; i++) {
1080 if (clk->parents && clk->parents[i] == parent)
1081 break;
1082 else if (!strcmp(clk->parent_names[i], parent->name)) {
1083 if (clk->parents)
1084 clk->parents[i] = __clk_lookup(parent->name);
1085 break;
1086 }
1087 }
1088
1089 if (i == clk->num_parents) {
1090 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1091 __func__, parent->name, clk->name);
1092 goto out;
1093 }
1094
1095 /* migrate prepare and enable */
1096 if (clk->prepare_count)
1097 __clk_prepare(parent);
1098
1099 /* FIXME replace with clk_is_enabled(clk) someday */
1100 spin_lock_irqsave(&enable_lock, flags);
1101 if (clk->enable_count)
1102 __clk_enable(parent);
1103 spin_unlock_irqrestore(&enable_lock, flags);
1104
1105 /* change clock input source */
1106 ret = clk->ops->set_parent(clk->hw, i);
1107
1108 /* clean up old prepare and enable */
1109 spin_lock_irqsave(&enable_lock, flags);
1110 if (clk->enable_count)
1111 __clk_disable(old_parent);
1112 spin_unlock_irqrestore(&enable_lock, flags);
1113
1114 if (clk->prepare_count)
1115 __clk_unprepare(old_parent);
1116
1117out:
1118 return ret;
1119}
1120
1121/**
1122 * clk_set_parent - switch the parent of a mux clk
1123 * @clk: the mux clk whose input we are switching
1124 * @parent: the new input to clk
1125 *
1126 * Re-parent clk to use parent as it's new input source. If clk has the
1127 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1128 * operation to succeed. After successfully changing clk's parent
1129 * clk_set_parent will update the clk topology, sysfs topology and
1130 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1131 * success, -EERROR otherwise.
1132 */
1133int clk_set_parent(struct clk *clk, struct clk *parent)
1134{
1135 int ret = 0;
1136
1137 if (!clk || !clk->ops)
1138 return -EINVAL;
1139
1140 if (!clk->ops->set_parent)
1141 return -ENOSYS;
1142
1143 /* prevent racing with updates to the clock topology */
1144 mutex_lock(&prepare_lock);
1145
1146 if (clk->parent == parent)
1147 goto out;
1148
1149 /* propagate PRE_RATE_CHANGE notifications */
1150 if (clk->notifier_count)
1151 ret = __clk_speculate_rates(clk, parent->rate);
1152
1153 /* abort if a driver objects */
1154 if (ret == NOTIFY_STOP)
1155 goto out;
1156
1157 /* only re-parent if the clock is not in use */
1158 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1159 ret = -EBUSY;
1160 else
1161 ret = __clk_set_parent(clk, parent);
1162
1163 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1164 if (ret) {
1165 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1166 goto out;
1167 }
1168
1169 /* propagate rate recalculation downstream */
1170 __clk_reparent(clk, parent);
1171
1172out:
1173 mutex_unlock(&prepare_lock);
1174
1175 return ret;
1176}
1177EXPORT_SYMBOL_GPL(clk_set_parent);
1178
1179/**
1180 * __clk_init - initialize the data structures in a struct clk
1181 * @dev: device initializing this clk, placeholder for now
1182 * @clk: clk being initialized
1183 *
1184 * Initializes the lists in struct clk, queries the hardware for the
1185 * parent and rate and sets them both.
1186 */
1187int __clk_init(struct device *dev, struct clk *clk)
1188{
1189 int i, ret = 0;
1190 struct clk *orphan;
1191 struct hlist_node *tmp, *tmp2;
1192
1193 if (!clk)
1194 return -EINVAL;
1195
1196 mutex_lock(&prepare_lock);
1197
1198 /* check to see if a clock with this name is already registered */
1199 if (__clk_lookup(clk->name)) {
1200 pr_debug("%s: clk %s already initialized\n",
1201 __func__, clk->name);
1202 ret = -EEXIST;
1203 goto out;
1204 }
1205
1206 /* check that clk_ops are sane. See Documentation/clk.txt */
1207 if (clk->ops->set_rate &&
1208 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1209 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1210 __func__, clk->name);
1211 ret = -EINVAL;
1212 goto out;
1213 }
1214
1215 if (clk->ops->set_parent && !clk->ops->get_parent) {
1216 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1217 __func__, clk->name);
1218 ret = -EINVAL;
1219 goto out;
1220 }
1221
1222 /* throw a WARN if any entries in parent_names are NULL */
1223 for (i = 0; i < clk->num_parents; i++)
1224 WARN(!clk->parent_names[i],
1225 "%s: invalid NULL in %s's .parent_names\n",
1226 __func__, clk->name);
1227
1228 /*
1229 * Allocate an array of struct clk *'s to avoid unnecessary string
1230 * look-ups of clk's possible parents. This can fail for clocks passed
1231 * in to clk_init during early boot; thus any access to clk->parents[]
1232 * must always check for a NULL pointer and try to populate it if
1233 * necessary.
1234 *
1235 * If clk->parents is not NULL we skip this entire block. This allows
1236 * for clock drivers to statically initialize clk->parents.
1237 */
1238 if (clk->num_parents && !clk->parents) {
1239 clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents),
1240 GFP_KERNEL);
1241 /*
1242 * __clk_lookup returns NULL for parents that have not been
1243 * clk_init'd; thus any access to clk->parents[] must check
1244 * for a NULL pointer. We can always perform lazy lookups for
1245 * missing parents later on.
1246 */
1247 if (clk->parents)
1248 for (i = 0; i < clk->num_parents; i++)
1249 clk->parents[i] =
1250 __clk_lookup(clk->parent_names[i]);
1251 }
1252
1253 clk->parent = __clk_init_parent(clk);
1254
1255 /*
1256 * Populate clk->parent if parent has already been __clk_init'd. If
1257 * parent has not yet been __clk_init'd then place clk in the orphan
1258 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1259 * clk list.
1260 *
1261 * Every time a new clk is clk_init'd then we walk the list of orphan
1262 * clocks and re-parent any that are children of the clock currently
1263 * being clk_init'd.
1264 */
1265 if (clk->parent)
1266 hlist_add_head(&clk->child_node,
1267 &clk->parent->children);
1268 else if (clk->flags & CLK_IS_ROOT)
1269 hlist_add_head(&clk->child_node, &clk_root_list);
1270 else
1271 hlist_add_head(&clk->child_node, &clk_orphan_list);
1272
1273 /*
1274 * Set clk's rate. The preferred method is to use .recalc_rate. For
1275 * simple clocks and lazy developers the default fallback is to use the
1276 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1277 * then rate is set to zero.
1278 */
1279 if (clk->ops->recalc_rate)
1280 clk->rate = clk->ops->recalc_rate(clk->hw,
1281 __clk_get_rate(clk->parent));
1282 else if (clk->parent)
1283 clk->rate = clk->parent->rate;
1284 else
1285 clk->rate = 0;
1286
1287 /*
1288 * walk the list of orphan clocks and reparent any that are children of
1289 * this clock
1290 */
1291 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
1292 for (i = 0; i < orphan->num_parents; i++)
1293 if (!strcmp(clk->name, orphan->parent_names[i])) {
1294 __clk_reparent(orphan, clk);
1295 break;
1296 }
1297
1298 /*
1299 * optional platform-specific magic
1300 *
1301 * The .init callback is not used by any of the basic clock types, but
1302 * exists for weird hardware that must perform initialization magic.
1303 * Please consider other ways of solving initialization problems before
1304 * using this callback, as it's use is discouraged.
1305 */
1306 if (clk->ops->init)
1307 clk->ops->init(clk->hw);
1308
1309 clk_debug_register(clk);
1310
1311out:
1312 mutex_unlock(&prepare_lock);
1313
1314 return ret;
1315}
1316
1317/**
1318 * __clk_register - register a clock and return a cookie.
1319 *
1320 * Same as clk_register, except that the .clk field inside hw shall point to a
1321 * preallocated (generally statically allocated) struct clk. None of the fields
1322 * of the struct clk need to be initialized.
1323 *
1324 * The data pointed to by .init and .clk field shall NOT be marked as init
1325 * data.
1326 *
1327 * __clk_register is only exposed via clk-private.h and is intended for use with
1328 * very large numbers of clocks that need to be statically initialized. It is
1329 * a layering violation to include clk-private.h from any code which implements
1330 * a clock's .ops; as such any statically initialized clock data MUST be in a
1331 * separate C file from the logic that implements it's operations. Returns 0
1332 * on success, otherwise an error code.
1333 */
1334struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1335{
1336 int ret;
1337 struct clk *clk;
1338
1339 clk = hw->clk;
1340 clk->name = hw->init->name;
1341 clk->ops = hw->init->ops;
1342 clk->hw = hw;
1343 clk->flags = hw->init->flags;
1344 clk->parent_names = hw->init->parent_names;
1345 clk->num_parents = hw->init->num_parents;
1346
1347 ret = __clk_init(dev, clk);
1348 if (ret)
1349 return ERR_PTR(ret);
1350
1351 return clk;
1352}
1353EXPORT_SYMBOL_GPL(__clk_register);
1354
1355/**
1356 * clk_register - allocate a new clock, register it and return an opaque cookie
1357 * @dev: device that is registering this clock
1358 * @hw: link to hardware-specific clock data
1359 *
1360 * clk_register is the primary interface for populating the clock tree with new
1361 * clock nodes. It returns a pointer to the newly allocated struct clk which
1362 * cannot be dereferenced by driver code but may be used in conjuction with the
1363 * rest of the clock API. In the event of an error clk_register will return an
1364 * error code; drivers must test for an error code after calling clk_register.
1365 */
1366struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1367{
1368 int i, ret;
1369 struct clk *clk;
1370
1371 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1372 if (!clk) {
1373 pr_err("%s: could not allocate clk\n", __func__);
1374 ret = -ENOMEM;
1375 goto fail_out;
1376 }
1377
1378 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1379 if (!clk->name) {
1380 pr_err("%s: could not allocate clk->name\n", __func__);
1381 ret = -ENOMEM;
1382 goto fail_name;
1383 }
1384 clk->ops = hw->init->ops;
1385 clk->hw = hw;
1386 clk->flags = hw->init->flags;
1387 clk->num_parents = hw->init->num_parents;
1388 hw->clk = clk;
1389
1390 /* allocate local copy in case parent_names is __initdata */
1391 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1392 GFP_KERNEL);
1393
1394 if (!clk->parent_names) {
1395 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1396 ret = -ENOMEM;
1397 goto fail_parent_names;
1398 }
1399
1400
1401 /* copy each string name in case parent_names is __initdata */
1402 for (i = 0; i < clk->num_parents; i++) {
1403 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1404 GFP_KERNEL);
1405 if (!clk->parent_names[i]) {
1406 pr_err("%s: could not copy parent_names\n", __func__);
1407 ret = -ENOMEM;
1408 goto fail_parent_names_copy;
1409 }
1410 }
1411
1412 ret = __clk_init(dev, clk);
1413 if (!ret)
1414 return clk;
1415
1416fail_parent_names_copy:
1417 while (--i >= 0)
1418 kfree(clk->parent_names[i]);
1419 kfree(clk->parent_names);
1420fail_parent_names:
1421 kfree(clk->name);
1422fail_name:
1423 kfree(clk);
1424fail_out:
1425 return ERR_PTR(ret);
1426}
1427EXPORT_SYMBOL_GPL(clk_register);
1428
1429/**
1430 * clk_unregister - unregister a currently registered clock
1431 * @clk: clock to unregister
1432 *
1433 * Currently unimplemented.
1434 */
1435void clk_unregister(struct clk *clk) {}
1436EXPORT_SYMBOL_GPL(clk_unregister);
1437
1438/*** clk rate change notifiers ***/
1439
1440/**
1441 * clk_notifier_register - add a clk rate change notifier
1442 * @clk: struct clk * to watch
1443 * @nb: struct notifier_block * with callback info
1444 *
1445 * Request notification when clk's rate changes. This uses an SRCU
1446 * notifier because we want it to block and notifier unregistrations are
1447 * uncommon. The callbacks associated with the notifier must not
1448 * re-enter into the clk framework by calling any top-level clk APIs;
1449 * this will cause a nested prepare_lock mutex.
1450 *
1451 * Pre-change notifier callbacks will be passed the current, pre-change
1452 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1453 * post-change rate of the clk is passed via struct
1454 * clk_notifier_data.new_rate.
1455 *
1456 * Post-change notifiers will pass the now-current, post-change rate of
1457 * the clk in both struct clk_notifier_data.old_rate and struct
1458 * clk_notifier_data.new_rate.
1459 *
1460 * Abort-change notifiers are effectively the opposite of pre-change
1461 * notifiers: the original pre-change clk rate is passed in via struct
1462 * clk_notifier_data.new_rate and the failed post-change rate is passed
1463 * in via struct clk_notifier_data.old_rate.
1464 *
1465 * clk_notifier_register() must be called from non-atomic context.
1466 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1467 * allocation failure; otherwise, passes along the return value of
1468 * srcu_notifier_chain_register().
1469 */
1470int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1471{
1472 struct clk_notifier *cn;
1473 int ret = -ENOMEM;
1474
1475 if (!clk || !nb)
1476 return -EINVAL;
1477
1478 mutex_lock(&prepare_lock);
1479
1480 /* search the list of notifiers for this clk */
1481 list_for_each_entry(cn, &clk_notifier_list, node)
1482 if (cn->clk == clk)
1483 break;
1484
1485 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1486 if (cn->clk != clk) {
1487 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1488 if (!cn)
1489 goto out;
1490
1491 cn->clk = clk;
1492 srcu_init_notifier_head(&cn->notifier_head);
1493
1494 list_add(&cn->node, &clk_notifier_list);
1495 }
1496
1497 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1498
1499 clk->notifier_count++;
1500
1501out:
1502 mutex_unlock(&prepare_lock);
1503
1504 return ret;
1505}
1506EXPORT_SYMBOL_GPL(clk_notifier_register);
1507
1508/**
1509 * clk_notifier_unregister - remove a clk rate change notifier
1510 * @clk: struct clk *
1511 * @nb: struct notifier_block * with callback info
1512 *
1513 * Request no further notification for changes to 'clk' and frees memory
1514 * allocated in clk_notifier_register.
1515 *
1516 * Returns -EINVAL if called with null arguments; otherwise, passes
1517 * along the return value of srcu_notifier_chain_unregister().
1518 */
1519int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1520{
1521 struct clk_notifier *cn = NULL;
1522 int ret = -EINVAL;
1523
1524 if (!clk || !nb)
1525 return -EINVAL;
1526
1527 mutex_lock(&prepare_lock);
1528
1529 list_for_each_entry(cn, &clk_notifier_list, node)
1530 if (cn->clk == clk)
1531 break;
1532
1533 if (cn->clk == clk) {
1534 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1535
1536 clk->notifier_count--;
1537
1538 /* XXX the notifier code should handle this better */
1539 if (!cn->notifier_head.head) {
1540 srcu_cleanup_notifier_head(&cn->notifier_head);
1541 kfree(cn);
1542 }
1543
1544 } else {
1545 ret = -ENOENT;
1546 }
1547
1548 mutex_unlock(&prepare_lock);
1549
1550 return ret;
1551}
1552EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12#include <linux/clk.h>
13#include <linux/clk-provider.h>
14#include <linux/clk/clk-conf.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/spinlock.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/of.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/pm_runtime.h>
25#include <linux/sched.h>
26#include <linux/clkdev.h>
27#include <linux/stringify.h>
28
29#include "clk.h"
30
31static DEFINE_SPINLOCK(enable_lock);
32static DEFINE_MUTEX(prepare_lock);
33
34static struct task_struct *prepare_owner;
35static struct task_struct *enable_owner;
36
37static int prepare_refcnt;
38static int enable_refcnt;
39
40static HLIST_HEAD(clk_root_list);
41static HLIST_HEAD(clk_orphan_list);
42static LIST_HEAD(clk_notifier_list);
43
44/*** private data structures ***/
45
46struct clk_core {
47 const char *name;
48 const struct clk_ops *ops;
49 struct clk_hw *hw;
50 struct module *owner;
51 struct device *dev;
52 struct clk_core *parent;
53 const char **parent_names;
54 struct clk_core **parents;
55 u8 num_parents;
56 u8 new_parent_index;
57 unsigned long rate;
58 unsigned long req_rate;
59 unsigned long new_rate;
60 struct clk_core *new_parent;
61 struct clk_core *new_child;
62 unsigned long flags;
63 bool orphan;
64 unsigned int enable_count;
65 unsigned int prepare_count;
66 unsigned int protect_count;
67 unsigned long min_rate;
68 unsigned long max_rate;
69 unsigned long accuracy;
70 int phase;
71 struct hlist_head children;
72 struct hlist_node child_node;
73 struct hlist_head clks;
74 unsigned int notifier_count;
75#ifdef CONFIG_DEBUG_FS
76 struct dentry *dentry;
77 struct hlist_node debug_node;
78#endif
79 struct kref ref;
80};
81
82#define CREATE_TRACE_POINTS
83#include <trace/events/clk.h>
84
85struct clk {
86 struct clk_core *core;
87 const char *dev_id;
88 const char *con_id;
89 unsigned long min_rate;
90 unsigned long max_rate;
91 unsigned int exclusive_count;
92 struct hlist_node clks_node;
93};
94
95/*** runtime pm ***/
96static int clk_pm_runtime_get(struct clk_core *core)
97{
98 int ret = 0;
99
100 if (!core->dev)
101 return 0;
102
103 ret = pm_runtime_get_sync(core->dev);
104 return ret < 0 ? ret : 0;
105}
106
107static void clk_pm_runtime_put(struct clk_core *core)
108{
109 if (!core->dev)
110 return;
111
112 pm_runtime_put_sync(core->dev);
113}
114
115/*** locking ***/
116static void clk_prepare_lock(void)
117{
118 if (!mutex_trylock(&prepare_lock)) {
119 if (prepare_owner == current) {
120 prepare_refcnt++;
121 return;
122 }
123 mutex_lock(&prepare_lock);
124 }
125 WARN_ON_ONCE(prepare_owner != NULL);
126 WARN_ON_ONCE(prepare_refcnt != 0);
127 prepare_owner = current;
128 prepare_refcnt = 1;
129}
130
131static void clk_prepare_unlock(void)
132{
133 WARN_ON_ONCE(prepare_owner != current);
134 WARN_ON_ONCE(prepare_refcnt == 0);
135
136 if (--prepare_refcnt)
137 return;
138 prepare_owner = NULL;
139 mutex_unlock(&prepare_lock);
140}
141
142static unsigned long clk_enable_lock(void)
143 __acquires(enable_lock)
144{
145 unsigned long flags;
146
147 /*
148 * On UP systems, spin_trylock_irqsave() always returns true, even if
149 * we already hold the lock. So, in that case, we rely only on
150 * reference counting.
151 */
152 if (!IS_ENABLED(CONFIG_SMP) ||
153 !spin_trylock_irqsave(&enable_lock, flags)) {
154 if (enable_owner == current) {
155 enable_refcnt++;
156 __acquire(enable_lock);
157 if (!IS_ENABLED(CONFIG_SMP))
158 local_save_flags(flags);
159 return flags;
160 }
161 spin_lock_irqsave(&enable_lock, flags);
162 }
163 WARN_ON_ONCE(enable_owner != NULL);
164 WARN_ON_ONCE(enable_refcnt != 0);
165 enable_owner = current;
166 enable_refcnt = 1;
167 return flags;
168}
169
170static void clk_enable_unlock(unsigned long flags)
171 __releases(enable_lock)
172{
173 WARN_ON_ONCE(enable_owner != current);
174 WARN_ON_ONCE(enable_refcnt == 0);
175
176 if (--enable_refcnt) {
177 __release(enable_lock);
178 return;
179 }
180 enable_owner = NULL;
181 spin_unlock_irqrestore(&enable_lock, flags);
182}
183
184static bool clk_core_rate_is_protected(struct clk_core *core)
185{
186 return core->protect_count;
187}
188
189static bool clk_core_is_prepared(struct clk_core *core)
190{
191 bool ret = false;
192
193 /*
194 * .is_prepared is optional for clocks that can prepare
195 * fall back to software usage counter if it is missing
196 */
197 if (!core->ops->is_prepared)
198 return core->prepare_count;
199
200 if (!clk_pm_runtime_get(core)) {
201 ret = core->ops->is_prepared(core->hw);
202 clk_pm_runtime_put(core);
203 }
204
205 return ret;
206}
207
208static bool clk_core_is_enabled(struct clk_core *core)
209{
210 bool ret = false;
211
212 /*
213 * .is_enabled is only mandatory for clocks that gate
214 * fall back to software usage counter if .is_enabled is missing
215 */
216 if (!core->ops->is_enabled)
217 return core->enable_count;
218
219 /*
220 * Check if clock controller's device is runtime active before
221 * calling .is_enabled callback. If not, assume that clock is
222 * disabled, because we might be called from atomic context, from
223 * which pm_runtime_get() is not allowed.
224 * This function is called mainly from clk_disable_unused_subtree,
225 * which ensures proper runtime pm activation of controller before
226 * taking enable spinlock, but the below check is needed if one tries
227 * to call it from other places.
228 */
229 if (core->dev) {
230 pm_runtime_get_noresume(core->dev);
231 if (!pm_runtime_active(core->dev)) {
232 ret = false;
233 goto done;
234 }
235 }
236
237 ret = core->ops->is_enabled(core->hw);
238done:
239 if (core->dev)
240 pm_runtime_put(core->dev);
241
242 return ret;
243}
244
245/*** helper functions ***/
246
247const char *__clk_get_name(const struct clk *clk)
248{
249 return !clk ? NULL : clk->core->name;
250}
251EXPORT_SYMBOL_GPL(__clk_get_name);
252
253const char *clk_hw_get_name(const struct clk_hw *hw)
254{
255 return hw->core->name;
256}
257EXPORT_SYMBOL_GPL(clk_hw_get_name);
258
259struct clk_hw *__clk_get_hw(struct clk *clk)
260{
261 return !clk ? NULL : clk->core->hw;
262}
263EXPORT_SYMBOL_GPL(__clk_get_hw);
264
265unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
266{
267 return hw->core->num_parents;
268}
269EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
270
271struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
272{
273 return hw->core->parent ? hw->core->parent->hw : NULL;
274}
275EXPORT_SYMBOL_GPL(clk_hw_get_parent);
276
277static struct clk_core *__clk_lookup_subtree(const char *name,
278 struct clk_core *core)
279{
280 struct clk_core *child;
281 struct clk_core *ret;
282
283 if (!strcmp(core->name, name))
284 return core;
285
286 hlist_for_each_entry(child, &core->children, child_node) {
287 ret = __clk_lookup_subtree(name, child);
288 if (ret)
289 return ret;
290 }
291
292 return NULL;
293}
294
295static struct clk_core *clk_core_lookup(const char *name)
296{
297 struct clk_core *root_clk;
298 struct clk_core *ret;
299
300 if (!name)
301 return NULL;
302
303 /* search the 'proper' clk tree first */
304 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
305 ret = __clk_lookup_subtree(name, root_clk);
306 if (ret)
307 return ret;
308 }
309
310 /* if not found, then search the orphan tree */
311 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
312 ret = __clk_lookup_subtree(name, root_clk);
313 if (ret)
314 return ret;
315 }
316
317 return NULL;
318}
319
320static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
321 u8 index)
322{
323 if (!core || index >= core->num_parents)
324 return NULL;
325
326 if (!core->parents[index])
327 core->parents[index] =
328 clk_core_lookup(core->parent_names[index]);
329
330 return core->parents[index];
331}
332
333struct clk_hw *
334clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
335{
336 struct clk_core *parent;
337
338 parent = clk_core_get_parent_by_index(hw->core, index);
339
340 return !parent ? NULL : parent->hw;
341}
342EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
343
344unsigned int __clk_get_enable_count(struct clk *clk)
345{
346 return !clk ? 0 : clk->core->enable_count;
347}
348
349static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
350{
351 unsigned long ret;
352
353 if (!core) {
354 ret = 0;
355 goto out;
356 }
357
358 ret = core->rate;
359
360 if (!core->num_parents)
361 goto out;
362
363 if (!core->parent)
364 ret = 0;
365
366out:
367 return ret;
368}
369
370unsigned long clk_hw_get_rate(const struct clk_hw *hw)
371{
372 return clk_core_get_rate_nolock(hw->core);
373}
374EXPORT_SYMBOL_GPL(clk_hw_get_rate);
375
376static unsigned long __clk_get_accuracy(struct clk_core *core)
377{
378 if (!core)
379 return 0;
380
381 return core->accuracy;
382}
383
384unsigned long __clk_get_flags(struct clk *clk)
385{
386 return !clk ? 0 : clk->core->flags;
387}
388EXPORT_SYMBOL_GPL(__clk_get_flags);
389
390unsigned long clk_hw_get_flags(const struct clk_hw *hw)
391{
392 return hw->core->flags;
393}
394EXPORT_SYMBOL_GPL(clk_hw_get_flags);
395
396bool clk_hw_is_prepared(const struct clk_hw *hw)
397{
398 return clk_core_is_prepared(hw->core);
399}
400
401bool clk_hw_rate_is_protected(const struct clk_hw *hw)
402{
403 return clk_core_rate_is_protected(hw->core);
404}
405
406bool clk_hw_is_enabled(const struct clk_hw *hw)
407{
408 return clk_core_is_enabled(hw->core);
409}
410
411bool __clk_is_enabled(struct clk *clk)
412{
413 if (!clk)
414 return false;
415
416 return clk_core_is_enabled(clk->core);
417}
418EXPORT_SYMBOL_GPL(__clk_is_enabled);
419
420static bool mux_is_better_rate(unsigned long rate, unsigned long now,
421 unsigned long best, unsigned long flags)
422{
423 if (flags & CLK_MUX_ROUND_CLOSEST)
424 return abs(now - rate) < abs(best - rate);
425
426 return now <= rate && now > best;
427}
428
429int clk_mux_determine_rate_flags(struct clk_hw *hw,
430 struct clk_rate_request *req,
431 unsigned long flags)
432{
433 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
434 int i, num_parents, ret;
435 unsigned long best = 0;
436 struct clk_rate_request parent_req = *req;
437
438 /* if NO_REPARENT flag set, pass through to current parent */
439 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
440 parent = core->parent;
441 if (core->flags & CLK_SET_RATE_PARENT) {
442 ret = __clk_determine_rate(parent ? parent->hw : NULL,
443 &parent_req);
444 if (ret)
445 return ret;
446
447 best = parent_req.rate;
448 } else if (parent) {
449 best = clk_core_get_rate_nolock(parent);
450 } else {
451 best = clk_core_get_rate_nolock(core);
452 }
453
454 goto out;
455 }
456
457 /* find the parent that can provide the fastest rate <= rate */
458 num_parents = core->num_parents;
459 for (i = 0; i < num_parents; i++) {
460 parent = clk_core_get_parent_by_index(core, i);
461 if (!parent)
462 continue;
463
464 if (core->flags & CLK_SET_RATE_PARENT) {
465 parent_req = *req;
466 ret = __clk_determine_rate(parent->hw, &parent_req);
467 if (ret)
468 continue;
469 } else {
470 parent_req.rate = clk_core_get_rate_nolock(parent);
471 }
472
473 if (mux_is_better_rate(req->rate, parent_req.rate,
474 best, flags)) {
475 best_parent = parent;
476 best = parent_req.rate;
477 }
478 }
479
480 if (!best_parent)
481 return -EINVAL;
482
483out:
484 if (best_parent)
485 req->best_parent_hw = best_parent->hw;
486 req->best_parent_rate = best;
487 req->rate = best;
488
489 return 0;
490}
491EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
492
493struct clk *__clk_lookup(const char *name)
494{
495 struct clk_core *core = clk_core_lookup(name);
496
497 return !core ? NULL : core->hw->clk;
498}
499
500static void clk_core_get_boundaries(struct clk_core *core,
501 unsigned long *min_rate,
502 unsigned long *max_rate)
503{
504 struct clk *clk_user;
505
506 *min_rate = core->min_rate;
507 *max_rate = core->max_rate;
508
509 hlist_for_each_entry(clk_user, &core->clks, clks_node)
510 *min_rate = max(*min_rate, clk_user->min_rate);
511
512 hlist_for_each_entry(clk_user, &core->clks, clks_node)
513 *max_rate = min(*max_rate, clk_user->max_rate);
514}
515
516void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
517 unsigned long max_rate)
518{
519 hw->core->min_rate = min_rate;
520 hw->core->max_rate = max_rate;
521}
522EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
523
524/*
525 * Helper for finding best parent to provide a given frequency. This can be used
526 * directly as a determine_rate callback (e.g. for a mux), or from a more
527 * complex clock that may combine a mux with other operations.
528 */
529int __clk_mux_determine_rate(struct clk_hw *hw,
530 struct clk_rate_request *req)
531{
532 return clk_mux_determine_rate_flags(hw, req, 0);
533}
534EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
535
536int __clk_mux_determine_rate_closest(struct clk_hw *hw,
537 struct clk_rate_request *req)
538{
539 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
540}
541EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
542
543/*** clk api ***/
544
545static void clk_core_rate_unprotect(struct clk_core *core)
546{
547 lockdep_assert_held(&prepare_lock);
548
549 if (!core)
550 return;
551
552 if (WARN_ON(core->protect_count == 0))
553 return;
554
555 if (--core->protect_count > 0)
556 return;
557
558 clk_core_rate_unprotect(core->parent);
559}
560
561static int clk_core_rate_nuke_protect(struct clk_core *core)
562{
563 int ret;
564
565 lockdep_assert_held(&prepare_lock);
566
567 if (!core)
568 return -EINVAL;
569
570 if (core->protect_count == 0)
571 return 0;
572
573 ret = core->protect_count;
574 core->protect_count = 1;
575 clk_core_rate_unprotect(core);
576
577 return ret;
578}
579
580/**
581 * clk_rate_exclusive_put - release exclusivity over clock rate control
582 * @clk: the clk over which the exclusivity is released
583 *
584 * clk_rate_exclusive_put() completes a critical section during which a clock
585 * consumer cannot tolerate any other consumer making any operation on the
586 * clock which could result in a rate change or rate glitch. Exclusive clocks
587 * cannot have their rate changed, either directly or indirectly due to changes
588 * further up the parent chain of clocks. As a result, clocks up parent chain
589 * also get under exclusive control of the calling consumer.
590 *
591 * If exlusivity is claimed more than once on clock, even by the same consumer,
592 * the rate effectively gets locked as exclusivity can't be preempted.
593 *
594 * Calls to clk_rate_exclusive_put() must be balanced with calls to
595 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
596 * error status.
597 */
598void clk_rate_exclusive_put(struct clk *clk)
599{
600 if (!clk)
601 return;
602
603 clk_prepare_lock();
604
605 /*
606 * if there is something wrong with this consumer protect count, stop
607 * here before messing with the provider
608 */
609 if (WARN_ON(clk->exclusive_count <= 0))
610 goto out;
611
612 clk_core_rate_unprotect(clk->core);
613 clk->exclusive_count--;
614out:
615 clk_prepare_unlock();
616}
617EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
618
619static void clk_core_rate_protect(struct clk_core *core)
620{
621 lockdep_assert_held(&prepare_lock);
622
623 if (!core)
624 return;
625
626 if (core->protect_count == 0)
627 clk_core_rate_protect(core->parent);
628
629 core->protect_count++;
630}
631
632static void clk_core_rate_restore_protect(struct clk_core *core, int count)
633{
634 lockdep_assert_held(&prepare_lock);
635
636 if (!core)
637 return;
638
639 if (count == 0)
640 return;
641
642 clk_core_rate_protect(core);
643 core->protect_count = count;
644}
645
646/**
647 * clk_rate_exclusive_get - get exclusivity over the clk rate control
648 * @clk: the clk over which the exclusity of rate control is requested
649 *
650 * clk_rate_exlusive_get() begins a critical section during which a clock
651 * consumer cannot tolerate any other consumer making any operation on the
652 * clock which could result in a rate change or rate glitch. Exclusive clocks
653 * cannot have their rate changed, either directly or indirectly due to changes
654 * further up the parent chain of clocks. As a result, clocks up parent chain
655 * also get under exclusive control of the calling consumer.
656 *
657 * If exlusivity is claimed more than once on clock, even by the same consumer,
658 * the rate effectively gets locked as exclusivity can't be preempted.
659 *
660 * Calls to clk_rate_exclusive_get() should be balanced with calls to
661 * clk_rate_exclusive_put(). Calls to this function may sleep.
662 * Returns 0 on success, -EERROR otherwise
663 */
664int clk_rate_exclusive_get(struct clk *clk)
665{
666 if (!clk)
667 return 0;
668
669 clk_prepare_lock();
670 clk_core_rate_protect(clk->core);
671 clk->exclusive_count++;
672 clk_prepare_unlock();
673
674 return 0;
675}
676EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
677
678static void clk_core_unprepare(struct clk_core *core)
679{
680 lockdep_assert_held(&prepare_lock);
681
682 if (!core)
683 return;
684
685 if (WARN_ON(core->prepare_count == 0))
686 return;
687
688 if (WARN_ON(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL))
689 return;
690
691 if (--core->prepare_count > 0)
692 return;
693
694 WARN_ON(core->enable_count > 0);
695
696 trace_clk_unprepare(core);
697
698 if (core->ops->unprepare)
699 core->ops->unprepare(core->hw);
700
701 clk_pm_runtime_put(core);
702
703 trace_clk_unprepare_complete(core);
704 clk_core_unprepare(core->parent);
705}
706
707static void clk_core_unprepare_lock(struct clk_core *core)
708{
709 clk_prepare_lock();
710 clk_core_unprepare(core);
711 clk_prepare_unlock();
712}
713
714/**
715 * clk_unprepare - undo preparation of a clock source
716 * @clk: the clk being unprepared
717 *
718 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
719 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
720 * if the operation may sleep. One example is a clk which is accessed over
721 * I2c. In the complex case a clk gate operation may require a fast and a slow
722 * part. It is this reason that clk_unprepare and clk_disable are not mutually
723 * exclusive. In fact clk_disable must be called before clk_unprepare.
724 */
725void clk_unprepare(struct clk *clk)
726{
727 if (IS_ERR_OR_NULL(clk))
728 return;
729
730 clk_core_unprepare_lock(clk->core);
731}
732EXPORT_SYMBOL_GPL(clk_unprepare);
733
734static int clk_core_prepare(struct clk_core *core)
735{
736 int ret = 0;
737
738 lockdep_assert_held(&prepare_lock);
739
740 if (!core)
741 return 0;
742
743 if (core->prepare_count == 0) {
744 ret = clk_pm_runtime_get(core);
745 if (ret)
746 return ret;
747
748 ret = clk_core_prepare(core->parent);
749 if (ret)
750 goto runtime_put;
751
752 trace_clk_prepare(core);
753
754 if (core->ops->prepare)
755 ret = core->ops->prepare(core->hw);
756
757 trace_clk_prepare_complete(core);
758
759 if (ret)
760 goto unprepare;
761 }
762
763 core->prepare_count++;
764
765 return 0;
766unprepare:
767 clk_core_unprepare(core->parent);
768runtime_put:
769 clk_pm_runtime_put(core);
770 return ret;
771}
772
773static int clk_core_prepare_lock(struct clk_core *core)
774{
775 int ret;
776
777 clk_prepare_lock();
778 ret = clk_core_prepare(core);
779 clk_prepare_unlock();
780
781 return ret;
782}
783
784/**
785 * clk_prepare - prepare a clock source
786 * @clk: the clk being prepared
787 *
788 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
789 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
790 * operation may sleep. One example is a clk which is accessed over I2c. In
791 * the complex case a clk ungate operation may require a fast and a slow part.
792 * It is this reason that clk_prepare and clk_enable are not mutually
793 * exclusive. In fact clk_prepare must be called before clk_enable.
794 * Returns 0 on success, -EERROR otherwise.
795 */
796int clk_prepare(struct clk *clk)
797{
798 if (!clk)
799 return 0;
800
801 return clk_core_prepare_lock(clk->core);
802}
803EXPORT_SYMBOL_GPL(clk_prepare);
804
805static void clk_core_disable(struct clk_core *core)
806{
807 lockdep_assert_held(&enable_lock);
808
809 if (!core)
810 return;
811
812 if (WARN_ON(core->enable_count == 0))
813 return;
814
815 if (WARN_ON(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL))
816 return;
817
818 if (--core->enable_count > 0)
819 return;
820
821 trace_clk_disable_rcuidle(core);
822
823 if (core->ops->disable)
824 core->ops->disable(core->hw);
825
826 trace_clk_disable_complete_rcuidle(core);
827
828 clk_core_disable(core->parent);
829}
830
831static void clk_core_disable_lock(struct clk_core *core)
832{
833 unsigned long flags;
834
835 flags = clk_enable_lock();
836 clk_core_disable(core);
837 clk_enable_unlock(flags);
838}
839
840/**
841 * clk_disable - gate a clock
842 * @clk: the clk being gated
843 *
844 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
845 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
846 * clk if the operation is fast and will never sleep. One example is a
847 * SoC-internal clk which is controlled via simple register writes. In the
848 * complex case a clk gate operation may require a fast and a slow part. It is
849 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
850 * In fact clk_disable must be called before clk_unprepare.
851 */
852void clk_disable(struct clk *clk)
853{
854 if (IS_ERR_OR_NULL(clk))
855 return;
856
857 clk_core_disable_lock(clk->core);
858}
859EXPORT_SYMBOL_GPL(clk_disable);
860
861static int clk_core_enable(struct clk_core *core)
862{
863 int ret = 0;
864
865 lockdep_assert_held(&enable_lock);
866
867 if (!core)
868 return 0;
869
870 if (WARN_ON(core->prepare_count == 0))
871 return -ESHUTDOWN;
872
873 if (core->enable_count == 0) {
874 ret = clk_core_enable(core->parent);
875
876 if (ret)
877 return ret;
878
879 trace_clk_enable_rcuidle(core);
880
881 if (core->ops->enable)
882 ret = core->ops->enable(core->hw);
883
884 trace_clk_enable_complete_rcuidle(core);
885
886 if (ret) {
887 clk_core_disable(core->parent);
888 return ret;
889 }
890 }
891
892 core->enable_count++;
893 return 0;
894}
895
896static int clk_core_enable_lock(struct clk_core *core)
897{
898 unsigned long flags;
899 int ret;
900
901 flags = clk_enable_lock();
902 ret = clk_core_enable(core);
903 clk_enable_unlock(flags);
904
905 return ret;
906}
907
908/**
909 * clk_enable - ungate a clock
910 * @clk: the clk being ungated
911 *
912 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
913 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
914 * if the operation will never sleep. One example is a SoC-internal clk which
915 * is controlled via simple register writes. In the complex case a clk ungate
916 * operation may require a fast and a slow part. It is this reason that
917 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
918 * must be called before clk_enable. Returns 0 on success, -EERROR
919 * otherwise.
920 */
921int clk_enable(struct clk *clk)
922{
923 if (!clk)
924 return 0;
925
926 return clk_core_enable_lock(clk->core);
927}
928EXPORT_SYMBOL_GPL(clk_enable);
929
930static int clk_core_prepare_enable(struct clk_core *core)
931{
932 int ret;
933
934 ret = clk_core_prepare_lock(core);
935 if (ret)
936 return ret;
937
938 ret = clk_core_enable_lock(core);
939 if (ret)
940 clk_core_unprepare_lock(core);
941
942 return ret;
943}
944
945static void clk_core_disable_unprepare(struct clk_core *core)
946{
947 clk_core_disable_lock(core);
948 clk_core_unprepare_lock(core);
949}
950
951static void clk_unprepare_unused_subtree(struct clk_core *core)
952{
953 struct clk_core *child;
954
955 lockdep_assert_held(&prepare_lock);
956
957 hlist_for_each_entry(child, &core->children, child_node)
958 clk_unprepare_unused_subtree(child);
959
960 if (core->prepare_count)
961 return;
962
963 if (core->flags & CLK_IGNORE_UNUSED)
964 return;
965
966 if (clk_pm_runtime_get(core))
967 return;
968
969 if (clk_core_is_prepared(core)) {
970 trace_clk_unprepare(core);
971 if (core->ops->unprepare_unused)
972 core->ops->unprepare_unused(core->hw);
973 else if (core->ops->unprepare)
974 core->ops->unprepare(core->hw);
975 trace_clk_unprepare_complete(core);
976 }
977
978 clk_pm_runtime_put(core);
979}
980
981static void clk_disable_unused_subtree(struct clk_core *core)
982{
983 struct clk_core *child;
984 unsigned long flags;
985
986 lockdep_assert_held(&prepare_lock);
987
988 hlist_for_each_entry(child, &core->children, child_node)
989 clk_disable_unused_subtree(child);
990
991 if (core->flags & CLK_OPS_PARENT_ENABLE)
992 clk_core_prepare_enable(core->parent);
993
994 if (clk_pm_runtime_get(core))
995 goto unprepare_out;
996
997 flags = clk_enable_lock();
998
999 if (core->enable_count)
1000 goto unlock_out;
1001
1002 if (core->flags & CLK_IGNORE_UNUSED)
1003 goto unlock_out;
1004
1005 /*
1006 * some gate clocks have special needs during the disable-unused
1007 * sequence. call .disable_unused if available, otherwise fall
1008 * back to .disable
1009 */
1010 if (clk_core_is_enabled(core)) {
1011 trace_clk_disable(core);
1012 if (core->ops->disable_unused)
1013 core->ops->disable_unused(core->hw);
1014 else if (core->ops->disable)
1015 core->ops->disable(core->hw);
1016 trace_clk_disable_complete(core);
1017 }
1018
1019unlock_out:
1020 clk_enable_unlock(flags);
1021 clk_pm_runtime_put(core);
1022unprepare_out:
1023 if (core->flags & CLK_OPS_PARENT_ENABLE)
1024 clk_core_disable_unprepare(core->parent);
1025}
1026
1027static bool clk_ignore_unused;
1028static int __init clk_ignore_unused_setup(char *__unused)
1029{
1030 clk_ignore_unused = true;
1031 return 1;
1032}
1033__setup("clk_ignore_unused", clk_ignore_unused_setup);
1034
1035static int clk_disable_unused(void)
1036{
1037 struct clk_core *core;
1038
1039 if (clk_ignore_unused) {
1040 pr_warn("clk: Not disabling unused clocks\n");
1041 return 0;
1042 }
1043
1044 clk_prepare_lock();
1045
1046 hlist_for_each_entry(core, &clk_root_list, child_node)
1047 clk_disable_unused_subtree(core);
1048
1049 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1050 clk_disable_unused_subtree(core);
1051
1052 hlist_for_each_entry(core, &clk_root_list, child_node)
1053 clk_unprepare_unused_subtree(core);
1054
1055 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1056 clk_unprepare_unused_subtree(core);
1057
1058 clk_prepare_unlock();
1059
1060 return 0;
1061}
1062late_initcall_sync(clk_disable_unused);
1063
1064static int clk_core_determine_round_nolock(struct clk_core *core,
1065 struct clk_rate_request *req)
1066{
1067 long rate;
1068
1069 lockdep_assert_held(&prepare_lock);
1070
1071 if (!core)
1072 return 0;
1073
1074 /*
1075 * At this point, core protection will be disabled if
1076 * - if the provider is not protected at all
1077 * - if the calling consumer is the only one which has exclusivity
1078 * over the provider
1079 */
1080 if (clk_core_rate_is_protected(core)) {
1081 req->rate = core->rate;
1082 } else if (core->ops->determine_rate) {
1083 return core->ops->determine_rate(core->hw, req);
1084 } else if (core->ops->round_rate) {
1085 rate = core->ops->round_rate(core->hw, req->rate,
1086 &req->best_parent_rate);
1087 if (rate < 0)
1088 return rate;
1089
1090 req->rate = rate;
1091 } else {
1092 return -EINVAL;
1093 }
1094
1095 return 0;
1096}
1097
1098static void clk_core_init_rate_req(struct clk_core * const core,
1099 struct clk_rate_request *req)
1100{
1101 struct clk_core *parent;
1102
1103 if (WARN_ON(!core || !req))
1104 return;
1105
1106 parent = core->parent;
1107 if (parent) {
1108 req->best_parent_hw = parent->hw;
1109 req->best_parent_rate = parent->rate;
1110 } else {
1111 req->best_parent_hw = NULL;
1112 req->best_parent_rate = 0;
1113 }
1114}
1115
1116static bool clk_core_can_round(struct clk_core * const core)
1117{
1118 if (core->ops->determine_rate || core->ops->round_rate)
1119 return true;
1120
1121 return false;
1122}
1123
1124static int clk_core_round_rate_nolock(struct clk_core *core,
1125 struct clk_rate_request *req)
1126{
1127 lockdep_assert_held(&prepare_lock);
1128
1129 if (!core) {
1130 req->rate = 0;
1131 return 0;
1132 }
1133
1134 clk_core_init_rate_req(core, req);
1135
1136 if (clk_core_can_round(core))
1137 return clk_core_determine_round_nolock(core, req);
1138 else if (core->flags & CLK_SET_RATE_PARENT)
1139 return clk_core_round_rate_nolock(core->parent, req);
1140
1141 req->rate = core->rate;
1142 return 0;
1143}
1144
1145/**
1146 * __clk_determine_rate - get the closest rate actually supported by a clock
1147 * @hw: determine the rate of this clock
1148 * @req: target rate request
1149 *
1150 * Useful for clk_ops such as .set_rate and .determine_rate.
1151 */
1152int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1153{
1154 if (!hw) {
1155 req->rate = 0;
1156 return 0;
1157 }
1158
1159 return clk_core_round_rate_nolock(hw->core, req);
1160}
1161EXPORT_SYMBOL_GPL(__clk_determine_rate);
1162
1163unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1164{
1165 int ret;
1166 struct clk_rate_request req;
1167
1168 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1169 req.rate = rate;
1170
1171 ret = clk_core_round_rate_nolock(hw->core, &req);
1172 if (ret)
1173 return 0;
1174
1175 return req.rate;
1176}
1177EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1178
1179/**
1180 * clk_round_rate - round the given rate for a clk
1181 * @clk: the clk for which we are rounding a rate
1182 * @rate: the rate which is to be rounded
1183 *
1184 * Takes in a rate as input and rounds it to a rate that the clk can actually
1185 * use which is then returned. If clk doesn't support round_rate operation
1186 * then the parent rate is returned.
1187 */
1188long clk_round_rate(struct clk *clk, unsigned long rate)
1189{
1190 struct clk_rate_request req;
1191 int ret;
1192
1193 if (!clk)
1194 return 0;
1195
1196 clk_prepare_lock();
1197
1198 if (clk->exclusive_count)
1199 clk_core_rate_unprotect(clk->core);
1200
1201 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1202 req.rate = rate;
1203
1204 ret = clk_core_round_rate_nolock(clk->core, &req);
1205
1206 if (clk->exclusive_count)
1207 clk_core_rate_protect(clk->core);
1208
1209 clk_prepare_unlock();
1210
1211 if (ret)
1212 return ret;
1213
1214 return req.rate;
1215}
1216EXPORT_SYMBOL_GPL(clk_round_rate);
1217
1218/**
1219 * __clk_notify - call clk notifier chain
1220 * @core: clk that is changing rate
1221 * @msg: clk notifier type (see include/linux/clk.h)
1222 * @old_rate: old clk rate
1223 * @new_rate: new clk rate
1224 *
1225 * Triggers a notifier call chain on the clk rate-change notification
1226 * for 'clk'. Passes a pointer to the struct clk and the previous
1227 * and current rates to the notifier callback. Intended to be called by
1228 * internal clock code only. Returns NOTIFY_DONE from the last driver
1229 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1230 * a driver returns that.
1231 */
1232static int __clk_notify(struct clk_core *core, unsigned long msg,
1233 unsigned long old_rate, unsigned long new_rate)
1234{
1235 struct clk_notifier *cn;
1236 struct clk_notifier_data cnd;
1237 int ret = NOTIFY_DONE;
1238
1239 cnd.old_rate = old_rate;
1240 cnd.new_rate = new_rate;
1241
1242 list_for_each_entry(cn, &clk_notifier_list, node) {
1243 if (cn->clk->core == core) {
1244 cnd.clk = cn->clk;
1245 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1246 &cnd);
1247 if (ret & NOTIFY_STOP_MASK)
1248 return ret;
1249 }
1250 }
1251
1252 return ret;
1253}
1254
1255/**
1256 * __clk_recalc_accuracies
1257 * @core: first clk in the subtree
1258 *
1259 * Walks the subtree of clks starting with clk and recalculates accuracies as
1260 * it goes. Note that if a clk does not implement the .recalc_accuracy
1261 * callback then it is assumed that the clock will take on the accuracy of its
1262 * parent.
1263 */
1264static void __clk_recalc_accuracies(struct clk_core *core)
1265{
1266 unsigned long parent_accuracy = 0;
1267 struct clk_core *child;
1268
1269 lockdep_assert_held(&prepare_lock);
1270
1271 if (core->parent)
1272 parent_accuracy = core->parent->accuracy;
1273
1274 if (core->ops->recalc_accuracy)
1275 core->accuracy = core->ops->recalc_accuracy(core->hw,
1276 parent_accuracy);
1277 else
1278 core->accuracy = parent_accuracy;
1279
1280 hlist_for_each_entry(child, &core->children, child_node)
1281 __clk_recalc_accuracies(child);
1282}
1283
1284static long clk_core_get_accuracy(struct clk_core *core)
1285{
1286 unsigned long accuracy;
1287
1288 clk_prepare_lock();
1289 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1290 __clk_recalc_accuracies(core);
1291
1292 accuracy = __clk_get_accuracy(core);
1293 clk_prepare_unlock();
1294
1295 return accuracy;
1296}
1297
1298/**
1299 * clk_get_accuracy - return the accuracy of clk
1300 * @clk: the clk whose accuracy is being returned
1301 *
1302 * Simply returns the cached accuracy of the clk, unless
1303 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1304 * issued.
1305 * If clk is NULL then returns 0.
1306 */
1307long clk_get_accuracy(struct clk *clk)
1308{
1309 if (!clk)
1310 return 0;
1311
1312 return clk_core_get_accuracy(clk->core);
1313}
1314EXPORT_SYMBOL_GPL(clk_get_accuracy);
1315
1316static unsigned long clk_recalc(struct clk_core *core,
1317 unsigned long parent_rate)
1318{
1319 unsigned long rate = parent_rate;
1320
1321 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1322 rate = core->ops->recalc_rate(core->hw, parent_rate);
1323 clk_pm_runtime_put(core);
1324 }
1325 return rate;
1326}
1327
1328/**
1329 * __clk_recalc_rates
1330 * @core: first clk in the subtree
1331 * @msg: notification type (see include/linux/clk.h)
1332 *
1333 * Walks the subtree of clks starting with clk and recalculates rates as it
1334 * goes. Note that if a clk does not implement the .recalc_rate callback then
1335 * it is assumed that the clock will take on the rate of its parent.
1336 *
1337 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1338 * if necessary.
1339 */
1340static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1341{
1342 unsigned long old_rate;
1343 unsigned long parent_rate = 0;
1344 struct clk_core *child;
1345
1346 lockdep_assert_held(&prepare_lock);
1347
1348 old_rate = core->rate;
1349
1350 if (core->parent)
1351 parent_rate = core->parent->rate;
1352
1353 core->rate = clk_recalc(core, parent_rate);
1354
1355 /*
1356 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1357 * & ABORT_RATE_CHANGE notifiers
1358 */
1359 if (core->notifier_count && msg)
1360 __clk_notify(core, msg, old_rate, core->rate);
1361
1362 hlist_for_each_entry(child, &core->children, child_node)
1363 __clk_recalc_rates(child, msg);
1364}
1365
1366static unsigned long clk_core_get_rate(struct clk_core *core)
1367{
1368 unsigned long rate;
1369
1370 clk_prepare_lock();
1371
1372 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1373 __clk_recalc_rates(core, 0);
1374
1375 rate = clk_core_get_rate_nolock(core);
1376 clk_prepare_unlock();
1377
1378 return rate;
1379}
1380
1381/**
1382 * clk_get_rate - return the rate of clk
1383 * @clk: the clk whose rate is being returned
1384 *
1385 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1386 * is set, which means a recalc_rate will be issued.
1387 * If clk is NULL then returns 0.
1388 */
1389unsigned long clk_get_rate(struct clk *clk)
1390{
1391 if (!clk)
1392 return 0;
1393
1394 return clk_core_get_rate(clk->core);
1395}
1396EXPORT_SYMBOL_GPL(clk_get_rate);
1397
1398static int clk_fetch_parent_index(struct clk_core *core,
1399 struct clk_core *parent)
1400{
1401 int i;
1402
1403 if (!parent)
1404 return -EINVAL;
1405
1406 for (i = 0; i < core->num_parents; i++)
1407 if (clk_core_get_parent_by_index(core, i) == parent)
1408 return i;
1409
1410 return -EINVAL;
1411}
1412
1413/*
1414 * Update the orphan status of @core and all its children.
1415 */
1416static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1417{
1418 struct clk_core *child;
1419
1420 core->orphan = is_orphan;
1421
1422 hlist_for_each_entry(child, &core->children, child_node)
1423 clk_core_update_orphan_status(child, is_orphan);
1424}
1425
1426static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1427{
1428 bool was_orphan = core->orphan;
1429
1430 hlist_del(&core->child_node);
1431
1432 if (new_parent) {
1433 bool becomes_orphan = new_parent->orphan;
1434
1435 /* avoid duplicate POST_RATE_CHANGE notifications */
1436 if (new_parent->new_child == core)
1437 new_parent->new_child = NULL;
1438
1439 hlist_add_head(&core->child_node, &new_parent->children);
1440
1441 if (was_orphan != becomes_orphan)
1442 clk_core_update_orphan_status(core, becomes_orphan);
1443 } else {
1444 hlist_add_head(&core->child_node, &clk_orphan_list);
1445 if (!was_orphan)
1446 clk_core_update_orphan_status(core, true);
1447 }
1448
1449 core->parent = new_parent;
1450}
1451
1452static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1453 struct clk_core *parent)
1454{
1455 unsigned long flags;
1456 struct clk_core *old_parent = core->parent;
1457
1458 /*
1459 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1460 *
1461 * 2. Migrate prepare state between parents and prevent race with
1462 * clk_enable().
1463 *
1464 * If the clock is not prepared, then a race with
1465 * clk_enable/disable() is impossible since we already have the
1466 * prepare lock (future calls to clk_enable() need to be preceded by
1467 * a clk_prepare()).
1468 *
1469 * If the clock is prepared, migrate the prepared state to the new
1470 * parent and also protect against a race with clk_enable() by
1471 * forcing the clock and the new parent on. This ensures that all
1472 * future calls to clk_enable() are practically NOPs with respect to
1473 * hardware and software states.
1474 *
1475 * See also: Comment for clk_set_parent() below.
1476 */
1477
1478 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1479 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1480 clk_core_prepare_enable(old_parent);
1481 clk_core_prepare_enable(parent);
1482 }
1483
1484 /* migrate prepare count if > 0 */
1485 if (core->prepare_count) {
1486 clk_core_prepare_enable(parent);
1487 clk_core_enable_lock(core);
1488 }
1489
1490 /* update the clk tree topology */
1491 flags = clk_enable_lock();
1492 clk_reparent(core, parent);
1493 clk_enable_unlock(flags);
1494
1495 return old_parent;
1496}
1497
1498static void __clk_set_parent_after(struct clk_core *core,
1499 struct clk_core *parent,
1500 struct clk_core *old_parent)
1501{
1502 /*
1503 * Finish the migration of prepare state and undo the changes done
1504 * for preventing a race with clk_enable().
1505 */
1506 if (core->prepare_count) {
1507 clk_core_disable_lock(core);
1508 clk_core_disable_unprepare(old_parent);
1509 }
1510
1511 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1512 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1513 clk_core_disable_unprepare(parent);
1514 clk_core_disable_unprepare(old_parent);
1515 }
1516}
1517
1518static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1519 u8 p_index)
1520{
1521 unsigned long flags;
1522 int ret = 0;
1523 struct clk_core *old_parent;
1524
1525 old_parent = __clk_set_parent_before(core, parent);
1526
1527 trace_clk_set_parent(core, parent);
1528
1529 /* change clock input source */
1530 if (parent && core->ops->set_parent)
1531 ret = core->ops->set_parent(core->hw, p_index);
1532
1533 trace_clk_set_parent_complete(core, parent);
1534
1535 if (ret) {
1536 flags = clk_enable_lock();
1537 clk_reparent(core, old_parent);
1538 clk_enable_unlock(flags);
1539 __clk_set_parent_after(core, old_parent, parent);
1540
1541 return ret;
1542 }
1543
1544 __clk_set_parent_after(core, parent, old_parent);
1545
1546 return 0;
1547}
1548
1549/**
1550 * __clk_speculate_rates
1551 * @core: first clk in the subtree
1552 * @parent_rate: the "future" rate of clk's parent
1553 *
1554 * Walks the subtree of clks starting with clk, speculating rates as it
1555 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1556 *
1557 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1558 * pre-rate change notifications and returns early if no clks in the
1559 * subtree have subscribed to the notifications. Note that if a clk does not
1560 * implement the .recalc_rate callback then it is assumed that the clock will
1561 * take on the rate of its parent.
1562 */
1563static int __clk_speculate_rates(struct clk_core *core,
1564 unsigned long parent_rate)
1565{
1566 struct clk_core *child;
1567 unsigned long new_rate;
1568 int ret = NOTIFY_DONE;
1569
1570 lockdep_assert_held(&prepare_lock);
1571
1572 new_rate = clk_recalc(core, parent_rate);
1573
1574 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1575 if (core->notifier_count)
1576 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1577
1578 if (ret & NOTIFY_STOP_MASK) {
1579 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1580 __func__, core->name, ret);
1581 goto out;
1582 }
1583
1584 hlist_for_each_entry(child, &core->children, child_node) {
1585 ret = __clk_speculate_rates(child, new_rate);
1586 if (ret & NOTIFY_STOP_MASK)
1587 break;
1588 }
1589
1590out:
1591 return ret;
1592}
1593
1594static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1595 struct clk_core *new_parent, u8 p_index)
1596{
1597 struct clk_core *child;
1598
1599 core->new_rate = new_rate;
1600 core->new_parent = new_parent;
1601 core->new_parent_index = p_index;
1602 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1603 core->new_child = NULL;
1604 if (new_parent && new_parent != core->parent)
1605 new_parent->new_child = core;
1606
1607 hlist_for_each_entry(child, &core->children, child_node) {
1608 child->new_rate = clk_recalc(child, new_rate);
1609 clk_calc_subtree(child, child->new_rate, NULL, 0);
1610 }
1611}
1612
1613/*
1614 * calculate the new rates returning the topmost clock that has to be
1615 * changed.
1616 */
1617static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1618 unsigned long rate)
1619{
1620 struct clk_core *top = core;
1621 struct clk_core *old_parent, *parent;
1622 unsigned long best_parent_rate = 0;
1623 unsigned long new_rate;
1624 unsigned long min_rate;
1625 unsigned long max_rate;
1626 int p_index = 0;
1627 long ret;
1628
1629 /* sanity */
1630 if (IS_ERR_OR_NULL(core))
1631 return NULL;
1632
1633 /* save parent rate, if it exists */
1634 parent = old_parent = core->parent;
1635 if (parent)
1636 best_parent_rate = parent->rate;
1637
1638 clk_core_get_boundaries(core, &min_rate, &max_rate);
1639
1640 /* find the closest rate and parent clk/rate */
1641 if (clk_core_can_round(core)) {
1642 struct clk_rate_request req;
1643
1644 req.rate = rate;
1645 req.min_rate = min_rate;
1646 req.max_rate = max_rate;
1647
1648 clk_core_init_rate_req(core, &req);
1649
1650 ret = clk_core_determine_round_nolock(core, &req);
1651 if (ret < 0)
1652 return NULL;
1653
1654 best_parent_rate = req.best_parent_rate;
1655 new_rate = req.rate;
1656 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1657
1658 if (new_rate < min_rate || new_rate > max_rate)
1659 return NULL;
1660 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1661 /* pass-through clock without adjustable parent */
1662 core->new_rate = core->rate;
1663 return NULL;
1664 } else {
1665 /* pass-through clock with adjustable parent */
1666 top = clk_calc_new_rates(parent, rate);
1667 new_rate = parent->new_rate;
1668 goto out;
1669 }
1670
1671 /* some clocks must be gated to change parent */
1672 if (parent != old_parent &&
1673 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1674 pr_debug("%s: %s not gated but wants to reparent\n",
1675 __func__, core->name);
1676 return NULL;
1677 }
1678
1679 /* try finding the new parent index */
1680 if (parent && core->num_parents > 1) {
1681 p_index = clk_fetch_parent_index(core, parent);
1682 if (p_index < 0) {
1683 pr_debug("%s: clk %s can not be parent of clk %s\n",
1684 __func__, parent->name, core->name);
1685 return NULL;
1686 }
1687 }
1688
1689 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1690 best_parent_rate != parent->rate)
1691 top = clk_calc_new_rates(parent, best_parent_rate);
1692
1693out:
1694 clk_calc_subtree(core, new_rate, parent, p_index);
1695
1696 return top;
1697}
1698
1699/*
1700 * Notify about rate changes in a subtree. Always walk down the whole tree
1701 * so that in case of an error we can walk down the whole tree again and
1702 * abort the change.
1703 */
1704static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1705 unsigned long event)
1706{
1707 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1708 int ret = NOTIFY_DONE;
1709
1710 if (core->rate == core->new_rate)
1711 return NULL;
1712
1713 if (core->notifier_count) {
1714 ret = __clk_notify(core, event, core->rate, core->new_rate);
1715 if (ret & NOTIFY_STOP_MASK)
1716 fail_clk = core;
1717 }
1718
1719 hlist_for_each_entry(child, &core->children, child_node) {
1720 /* Skip children who will be reparented to another clock */
1721 if (child->new_parent && child->new_parent != core)
1722 continue;
1723 tmp_clk = clk_propagate_rate_change(child, event);
1724 if (tmp_clk)
1725 fail_clk = tmp_clk;
1726 }
1727
1728 /* handle the new child who might not be in core->children yet */
1729 if (core->new_child) {
1730 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1731 if (tmp_clk)
1732 fail_clk = tmp_clk;
1733 }
1734
1735 return fail_clk;
1736}
1737
1738/*
1739 * walk down a subtree and set the new rates notifying the rate
1740 * change on the way
1741 */
1742static void clk_change_rate(struct clk_core *core)
1743{
1744 struct clk_core *child;
1745 struct hlist_node *tmp;
1746 unsigned long old_rate;
1747 unsigned long best_parent_rate = 0;
1748 bool skip_set_rate = false;
1749 struct clk_core *old_parent;
1750 struct clk_core *parent = NULL;
1751
1752 old_rate = core->rate;
1753
1754 if (core->new_parent) {
1755 parent = core->new_parent;
1756 best_parent_rate = core->new_parent->rate;
1757 } else if (core->parent) {
1758 parent = core->parent;
1759 best_parent_rate = core->parent->rate;
1760 }
1761
1762 if (clk_pm_runtime_get(core))
1763 return;
1764
1765 if (core->flags & CLK_SET_RATE_UNGATE) {
1766 unsigned long flags;
1767
1768 clk_core_prepare(core);
1769 flags = clk_enable_lock();
1770 clk_core_enable(core);
1771 clk_enable_unlock(flags);
1772 }
1773
1774 if (core->new_parent && core->new_parent != core->parent) {
1775 old_parent = __clk_set_parent_before(core, core->new_parent);
1776 trace_clk_set_parent(core, core->new_parent);
1777
1778 if (core->ops->set_rate_and_parent) {
1779 skip_set_rate = true;
1780 core->ops->set_rate_and_parent(core->hw, core->new_rate,
1781 best_parent_rate,
1782 core->new_parent_index);
1783 } else if (core->ops->set_parent) {
1784 core->ops->set_parent(core->hw, core->new_parent_index);
1785 }
1786
1787 trace_clk_set_parent_complete(core, core->new_parent);
1788 __clk_set_parent_after(core, core->new_parent, old_parent);
1789 }
1790
1791 if (core->flags & CLK_OPS_PARENT_ENABLE)
1792 clk_core_prepare_enable(parent);
1793
1794 trace_clk_set_rate(core, core->new_rate);
1795
1796 if (!skip_set_rate && core->ops->set_rate)
1797 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
1798
1799 trace_clk_set_rate_complete(core, core->new_rate);
1800
1801 core->rate = clk_recalc(core, best_parent_rate);
1802
1803 if (core->flags & CLK_SET_RATE_UNGATE) {
1804 unsigned long flags;
1805
1806 flags = clk_enable_lock();
1807 clk_core_disable(core);
1808 clk_enable_unlock(flags);
1809 clk_core_unprepare(core);
1810 }
1811
1812 if (core->flags & CLK_OPS_PARENT_ENABLE)
1813 clk_core_disable_unprepare(parent);
1814
1815 if (core->notifier_count && old_rate != core->rate)
1816 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
1817
1818 if (core->flags & CLK_RECALC_NEW_RATES)
1819 (void)clk_calc_new_rates(core, core->new_rate);
1820
1821 /*
1822 * Use safe iteration, as change_rate can actually swap parents
1823 * for certain clock types.
1824 */
1825 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1826 /* Skip children who will be reparented to another clock */
1827 if (child->new_parent && child->new_parent != core)
1828 continue;
1829 clk_change_rate(child);
1830 }
1831
1832 /* handle the new child who might not be in core->children yet */
1833 if (core->new_child)
1834 clk_change_rate(core->new_child);
1835
1836 clk_pm_runtime_put(core);
1837}
1838
1839static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
1840 unsigned long req_rate)
1841{
1842 int ret, cnt;
1843 struct clk_rate_request req;
1844
1845 lockdep_assert_held(&prepare_lock);
1846
1847 if (!core)
1848 return 0;
1849
1850 /* simulate what the rate would be if it could be freely set */
1851 cnt = clk_core_rate_nuke_protect(core);
1852 if (cnt < 0)
1853 return cnt;
1854
1855 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
1856 req.rate = req_rate;
1857
1858 ret = clk_core_round_rate_nolock(core, &req);
1859
1860 /* restore the protection */
1861 clk_core_rate_restore_protect(core, cnt);
1862
1863 return ret ? 0 : req.rate;
1864}
1865
1866static int clk_core_set_rate_nolock(struct clk_core *core,
1867 unsigned long req_rate)
1868{
1869 struct clk_core *top, *fail_clk;
1870 unsigned long rate;
1871 int ret = 0;
1872
1873 if (!core)
1874 return 0;
1875
1876 rate = clk_core_req_round_rate_nolock(core, req_rate);
1877
1878 /* bail early if nothing to do */
1879 if (rate == clk_core_get_rate_nolock(core))
1880 return 0;
1881
1882 /* fail on a direct rate set of a protected provider */
1883 if (clk_core_rate_is_protected(core))
1884 return -EBUSY;
1885
1886 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
1887 return -EBUSY;
1888
1889 /* calculate new rates and get the topmost changed clock */
1890 top = clk_calc_new_rates(core, req_rate);
1891 if (!top)
1892 return -EINVAL;
1893
1894 ret = clk_pm_runtime_get(core);
1895 if (ret)
1896 return ret;
1897
1898 /* notify that we are about to change rates */
1899 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1900 if (fail_clk) {
1901 pr_debug("%s: failed to set %s rate\n", __func__,
1902 fail_clk->name);
1903 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1904 ret = -EBUSY;
1905 goto err;
1906 }
1907
1908 /* change the rates */
1909 clk_change_rate(top);
1910
1911 core->req_rate = req_rate;
1912err:
1913 clk_pm_runtime_put(core);
1914
1915 return ret;
1916}
1917
1918/**
1919 * clk_set_rate - specify a new rate for clk
1920 * @clk: the clk whose rate is being changed
1921 * @rate: the new rate for clk
1922 *
1923 * In the simplest case clk_set_rate will only adjust the rate of clk.
1924 *
1925 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1926 * propagate up to clk's parent; whether or not this happens depends on the
1927 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1928 * after calling .round_rate then upstream parent propagation is ignored. If
1929 * *parent_rate comes back with a new rate for clk's parent then we propagate
1930 * up to clk's parent and set its rate. Upward propagation will continue
1931 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1932 * .round_rate stops requesting changes to clk's parent_rate.
1933 *
1934 * Rate changes are accomplished via tree traversal that also recalculates the
1935 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1936 *
1937 * Returns 0 on success, -EERROR otherwise.
1938 */
1939int clk_set_rate(struct clk *clk, unsigned long rate)
1940{
1941 int ret;
1942
1943 if (!clk)
1944 return 0;
1945
1946 /* prevent racing with updates to the clock topology */
1947 clk_prepare_lock();
1948
1949 if (clk->exclusive_count)
1950 clk_core_rate_unprotect(clk->core);
1951
1952 ret = clk_core_set_rate_nolock(clk->core, rate);
1953
1954 if (clk->exclusive_count)
1955 clk_core_rate_protect(clk->core);
1956
1957 clk_prepare_unlock();
1958
1959 return ret;
1960}
1961EXPORT_SYMBOL_GPL(clk_set_rate);
1962
1963/**
1964 * clk_set_rate_exclusive - specify a new rate get exclusive control
1965 * @clk: the clk whose rate is being changed
1966 * @rate: the new rate for clk
1967 *
1968 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
1969 * within a critical section
1970 *
1971 * This can be used initially to ensure that at least 1 consumer is
1972 * statisfied when several consumers are competing for exclusivity over the
1973 * same clock provider.
1974 *
1975 * The exclusivity is not applied if setting the rate failed.
1976 *
1977 * Calls to clk_rate_exclusive_get() should be balanced with calls to
1978 * clk_rate_exclusive_put().
1979 *
1980 * Returns 0 on success, -EERROR otherwise.
1981 */
1982int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
1983{
1984 int ret;
1985
1986 if (!clk)
1987 return 0;
1988
1989 /* prevent racing with updates to the clock topology */
1990 clk_prepare_lock();
1991
1992 /*
1993 * The temporary protection removal is not here, on purpose
1994 * This function is meant to be used instead of clk_rate_protect,
1995 * so before the consumer code path protect the clock provider
1996 */
1997
1998 ret = clk_core_set_rate_nolock(clk->core, rate);
1999 if (!ret) {
2000 clk_core_rate_protect(clk->core);
2001 clk->exclusive_count++;
2002 }
2003
2004 clk_prepare_unlock();
2005
2006 return ret;
2007}
2008EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2009
2010/**
2011 * clk_set_rate_range - set a rate range for a clock source
2012 * @clk: clock source
2013 * @min: desired minimum clock rate in Hz, inclusive
2014 * @max: desired maximum clock rate in Hz, inclusive
2015 *
2016 * Returns success (0) or negative errno.
2017 */
2018int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2019{
2020 int ret = 0;
2021 unsigned long old_min, old_max, rate;
2022
2023 if (!clk)
2024 return 0;
2025
2026 if (min > max) {
2027 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2028 __func__, clk->core->name, clk->dev_id, clk->con_id,
2029 min, max);
2030 return -EINVAL;
2031 }
2032
2033 clk_prepare_lock();
2034
2035 if (clk->exclusive_count)
2036 clk_core_rate_unprotect(clk->core);
2037
2038 /* Save the current values in case we need to rollback the change */
2039 old_min = clk->min_rate;
2040 old_max = clk->max_rate;
2041 clk->min_rate = min;
2042 clk->max_rate = max;
2043
2044 rate = clk_core_get_rate_nolock(clk->core);
2045 if (rate < min || rate > max) {
2046 /*
2047 * FIXME:
2048 * We are in bit of trouble here, current rate is outside the
2049 * the requested range. We are going try to request appropriate
2050 * range boundary but there is a catch. It may fail for the
2051 * usual reason (clock broken, clock protected, etc) but also
2052 * because:
2053 * - round_rate() was not favorable and fell on the wrong
2054 * side of the boundary
2055 * - the determine_rate() callback does not really check for
2056 * this corner case when determining the rate
2057 */
2058
2059 if (rate < min)
2060 rate = min;
2061 else
2062 rate = max;
2063
2064 ret = clk_core_set_rate_nolock(clk->core, rate);
2065 if (ret) {
2066 /* rollback the changes */
2067 clk->min_rate = old_min;
2068 clk->max_rate = old_max;
2069 }
2070 }
2071
2072 if (clk->exclusive_count)
2073 clk_core_rate_protect(clk->core);
2074
2075 clk_prepare_unlock();
2076
2077 return ret;
2078}
2079EXPORT_SYMBOL_GPL(clk_set_rate_range);
2080
2081/**
2082 * clk_set_min_rate - set a minimum clock rate for a clock source
2083 * @clk: clock source
2084 * @rate: desired minimum clock rate in Hz, inclusive
2085 *
2086 * Returns success (0) or negative errno.
2087 */
2088int clk_set_min_rate(struct clk *clk, unsigned long rate)
2089{
2090 if (!clk)
2091 return 0;
2092
2093 return clk_set_rate_range(clk, rate, clk->max_rate);
2094}
2095EXPORT_SYMBOL_GPL(clk_set_min_rate);
2096
2097/**
2098 * clk_set_max_rate - set a maximum clock rate for a clock source
2099 * @clk: clock source
2100 * @rate: desired maximum clock rate in Hz, inclusive
2101 *
2102 * Returns success (0) or negative errno.
2103 */
2104int clk_set_max_rate(struct clk *clk, unsigned long rate)
2105{
2106 if (!clk)
2107 return 0;
2108
2109 return clk_set_rate_range(clk, clk->min_rate, rate);
2110}
2111EXPORT_SYMBOL_GPL(clk_set_max_rate);
2112
2113/**
2114 * clk_get_parent - return the parent of a clk
2115 * @clk: the clk whose parent gets returned
2116 *
2117 * Simply returns clk->parent. Returns NULL if clk is NULL.
2118 */
2119struct clk *clk_get_parent(struct clk *clk)
2120{
2121 struct clk *parent;
2122
2123 if (!clk)
2124 return NULL;
2125
2126 clk_prepare_lock();
2127 /* TODO: Create a per-user clk and change callers to call clk_put */
2128 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2129 clk_prepare_unlock();
2130
2131 return parent;
2132}
2133EXPORT_SYMBOL_GPL(clk_get_parent);
2134
2135static struct clk_core *__clk_init_parent(struct clk_core *core)
2136{
2137 u8 index = 0;
2138
2139 if (core->num_parents > 1 && core->ops->get_parent)
2140 index = core->ops->get_parent(core->hw);
2141
2142 return clk_core_get_parent_by_index(core, index);
2143}
2144
2145static void clk_core_reparent(struct clk_core *core,
2146 struct clk_core *new_parent)
2147{
2148 clk_reparent(core, new_parent);
2149 __clk_recalc_accuracies(core);
2150 __clk_recalc_rates(core, POST_RATE_CHANGE);
2151}
2152
2153void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2154{
2155 if (!hw)
2156 return;
2157
2158 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2159}
2160
2161/**
2162 * clk_has_parent - check if a clock is a possible parent for another
2163 * @clk: clock source
2164 * @parent: parent clock source
2165 *
2166 * This function can be used in drivers that need to check that a clock can be
2167 * the parent of another without actually changing the parent.
2168 *
2169 * Returns true if @parent is a possible parent for @clk, false otherwise.
2170 */
2171bool clk_has_parent(struct clk *clk, struct clk *parent)
2172{
2173 struct clk_core *core, *parent_core;
2174 unsigned int i;
2175
2176 /* NULL clocks should be nops, so return success if either is NULL. */
2177 if (!clk || !parent)
2178 return true;
2179
2180 core = clk->core;
2181 parent_core = parent->core;
2182
2183 /* Optimize for the case where the parent is already the parent. */
2184 if (core->parent == parent_core)
2185 return true;
2186
2187 for (i = 0; i < core->num_parents; i++)
2188 if (strcmp(core->parent_names[i], parent_core->name) == 0)
2189 return true;
2190
2191 return false;
2192}
2193EXPORT_SYMBOL_GPL(clk_has_parent);
2194
2195static int clk_core_set_parent_nolock(struct clk_core *core,
2196 struct clk_core *parent)
2197{
2198 int ret = 0;
2199 int p_index = 0;
2200 unsigned long p_rate = 0;
2201
2202 lockdep_assert_held(&prepare_lock);
2203
2204 if (!core)
2205 return 0;
2206
2207 if (core->parent == parent)
2208 return 0;
2209
2210 /* verify ops for for multi-parent clks */
2211 if (core->num_parents > 1 && !core->ops->set_parent)
2212 return -EPERM;
2213
2214 /* check that we are allowed to re-parent if the clock is in use */
2215 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2216 return -EBUSY;
2217
2218 if (clk_core_rate_is_protected(core))
2219 return -EBUSY;
2220
2221 /* try finding the new parent index */
2222 if (parent) {
2223 p_index = clk_fetch_parent_index(core, parent);
2224 if (p_index < 0) {
2225 pr_debug("%s: clk %s can not be parent of clk %s\n",
2226 __func__, parent->name, core->name);
2227 return p_index;
2228 }
2229 p_rate = parent->rate;
2230 }
2231
2232 ret = clk_pm_runtime_get(core);
2233 if (ret)
2234 return ret;
2235
2236 /* propagate PRE_RATE_CHANGE notifications */
2237 ret = __clk_speculate_rates(core, p_rate);
2238
2239 /* abort if a driver objects */
2240 if (ret & NOTIFY_STOP_MASK)
2241 goto runtime_put;
2242
2243 /* do the re-parent */
2244 ret = __clk_set_parent(core, parent, p_index);
2245
2246 /* propagate rate an accuracy recalculation accordingly */
2247 if (ret) {
2248 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2249 } else {
2250 __clk_recalc_rates(core, POST_RATE_CHANGE);
2251 __clk_recalc_accuracies(core);
2252 }
2253
2254runtime_put:
2255 clk_pm_runtime_put(core);
2256
2257 return ret;
2258}
2259
2260/**
2261 * clk_set_parent - switch the parent of a mux clk
2262 * @clk: the mux clk whose input we are switching
2263 * @parent: the new input to clk
2264 *
2265 * Re-parent clk to use parent as its new input source. If clk is in
2266 * prepared state, the clk will get enabled for the duration of this call. If
2267 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2268 * that, the reparenting is glitchy in hardware, etc), use the
2269 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2270 *
2271 * After successfully changing clk's parent clk_set_parent will update the
2272 * clk topology, sysfs topology and propagate rate recalculation via
2273 * __clk_recalc_rates.
2274 *
2275 * Returns 0 on success, -EERROR otherwise.
2276 */
2277int clk_set_parent(struct clk *clk, struct clk *parent)
2278{
2279 int ret;
2280
2281 if (!clk)
2282 return 0;
2283
2284 clk_prepare_lock();
2285
2286 if (clk->exclusive_count)
2287 clk_core_rate_unprotect(clk->core);
2288
2289 ret = clk_core_set_parent_nolock(clk->core,
2290 parent ? parent->core : NULL);
2291
2292 if (clk->exclusive_count)
2293 clk_core_rate_protect(clk->core);
2294
2295 clk_prepare_unlock();
2296
2297 return ret;
2298}
2299EXPORT_SYMBOL_GPL(clk_set_parent);
2300
2301static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2302{
2303 int ret = -EINVAL;
2304
2305 lockdep_assert_held(&prepare_lock);
2306
2307 if (!core)
2308 return 0;
2309
2310 if (clk_core_rate_is_protected(core))
2311 return -EBUSY;
2312
2313 trace_clk_set_phase(core, degrees);
2314
2315 if (core->ops->set_phase) {
2316 ret = core->ops->set_phase(core->hw, degrees);
2317 if (!ret)
2318 core->phase = degrees;
2319 }
2320
2321 trace_clk_set_phase_complete(core, degrees);
2322
2323 return ret;
2324}
2325
2326/**
2327 * clk_set_phase - adjust the phase shift of a clock signal
2328 * @clk: clock signal source
2329 * @degrees: number of degrees the signal is shifted
2330 *
2331 * Shifts the phase of a clock signal by the specified
2332 * degrees. Returns 0 on success, -EERROR otherwise.
2333 *
2334 * This function makes no distinction about the input or reference
2335 * signal that we adjust the clock signal phase against. For example
2336 * phase locked-loop clock signal generators we may shift phase with
2337 * respect to feedback clock signal input, but for other cases the
2338 * clock phase may be shifted with respect to some other, unspecified
2339 * signal.
2340 *
2341 * Additionally the concept of phase shift does not propagate through
2342 * the clock tree hierarchy, which sets it apart from clock rates and
2343 * clock accuracy. A parent clock phase attribute does not have an
2344 * impact on the phase attribute of a child clock.
2345 */
2346int clk_set_phase(struct clk *clk, int degrees)
2347{
2348 int ret;
2349
2350 if (!clk)
2351 return 0;
2352
2353 /* sanity check degrees */
2354 degrees %= 360;
2355 if (degrees < 0)
2356 degrees += 360;
2357
2358 clk_prepare_lock();
2359
2360 if (clk->exclusive_count)
2361 clk_core_rate_unprotect(clk->core);
2362
2363 ret = clk_core_set_phase_nolock(clk->core, degrees);
2364
2365 if (clk->exclusive_count)
2366 clk_core_rate_protect(clk->core);
2367
2368 clk_prepare_unlock();
2369
2370 return ret;
2371}
2372EXPORT_SYMBOL_GPL(clk_set_phase);
2373
2374static int clk_core_get_phase(struct clk_core *core)
2375{
2376 int ret;
2377
2378 clk_prepare_lock();
2379 /* Always try to update cached phase if possible */
2380 if (core->ops->get_phase)
2381 core->phase = core->ops->get_phase(core->hw);
2382 ret = core->phase;
2383 clk_prepare_unlock();
2384
2385 return ret;
2386}
2387
2388/**
2389 * clk_get_phase - return the phase shift of a clock signal
2390 * @clk: clock signal source
2391 *
2392 * Returns the phase shift of a clock node in degrees, otherwise returns
2393 * -EERROR.
2394 */
2395int clk_get_phase(struct clk *clk)
2396{
2397 if (!clk)
2398 return 0;
2399
2400 return clk_core_get_phase(clk->core);
2401}
2402EXPORT_SYMBOL_GPL(clk_get_phase);
2403
2404/**
2405 * clk_is_match - check if two clk's point to the same hardware clock
2406 * @p: clk compared against q
2407 * @q: clk compared against p
2408 *
2409 * Returns true if the two struct clk pointers both point to the same hardware
2410 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2411 * share the same struct clk_core object.
2412 *
2413 * Returns false otherwise. Note that two NULL clks are treated as matching.
2414 */
2415bool clk_is_match(const struct clk *p, const struct clk *q)
2416{
2417 /* trivial case: identical struct clk's or both NULL */
2418 if (p == q)
2419 return true;
2420
2421 /* true if clk->core pointers match. Avoid dereferencing garbage */
2422 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2423 if (p->core == q->core)
2424 return true;
2425
2426 return false;
2427}
2428EXPORT_SYMBOL_GPL(clk_is_match);
2429
2430/*** debugfs support ***/
2431
2432#ifdef CONFIG_DEBUG_FS
2433#include <linux/debugfs.h>
2434
2435static struct dentry *rootdir;
2436static int inited = 0;
2437static DEFINE_MUTEX(clk_debug_lock);
2438static HLIST_HEAD(clk_debug_list);
2439
2440static struct hlist_head *all_lists[] = {
2441 &clk_root_list,
2442 &clk_orphan_list,
2443 NULL,
2444};
2445
2446static struct hlist_head *orphan_list[] = {
2447 &clk_orphan_list,
2448 NULL,
2449};
2450
2451static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2452 int level)
2453{
2454 if (!c)
2455 return;
2456
2457 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n",
2458 level * 3 + 1, "",
2459 30 - level * 3, c->name,
2460 c->enable_count, c->prepare_count, c->protect_count,
2461 clk_core_get_rate(c), clk_core_get_accuracy(c),
2462 clk_core_get_phase(c));
2463}
2464
2465static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2466 int level)
2467{
2468 struct clk_core *child;
2469
2470 if (!c)
2471 return;
2472
2473 clk_summary_show_one(s, c, level);
2474
2475 hlist_for_each_entry(child, &c->children, child_node)
2476 clk_summary_show_subtree(s, child, level + 1);
2477}
2478
2479static int clk_summary_show(struct seq_file *s, void *data)
2480{
2481 struct clk_core *c;
2482 struct hlist_head **lists = (struct hlist_head **)s->private;
2483
2484 seq_puts(s, " enable prepare protect \n");
2485 seq_puts(s, " clock count count count rate accuracy phase\n");
2486 seq_puts(s, "----------------------------------------------------------------------------------------\n");
2487
2488 clk_prepare_lock();
2489
2490 for (; *lists; lists++)
2491 hlist_for_each_entry(c, *lists, child_node)
2492 clk_summary_show_subtree(s, c, 0);
2493
2494 clk_prepare_unlock();
2495
2496 return 0;
2497}
2498DEFINE_SHOW_ATTRIBUTE(clk_summary);
2499
2500static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2501{
2502 if (!c)
2503 return;
2504
2505 /* This should be JSON format, i.e. elements separated with a comma */
2506 seq_printf(s, "\"%s\": { ", c->name);
2507 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2508 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2509 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2510 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2511 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2512 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
2513}
2514
2515static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2516{
2517 struct clk_core *child;
2518
2519 if (!c)
2520 return;
2521
2522 clk_dump_one(s, c, level);
2523
2524 hlist_for_each_entry(child, &c->children, child_node) {
2525 seq_putc(s, ',');
2526 clk_dump_subtree(s, child, level + 1);
2527 }
2528
2529 seq_putc(s, '}');
2530}
2531
2532static int clk_dump_show(struct seq_file *s, void *data)
2533{
2534 struct clk_core *c;
2535 bool first_node = true;
2536 struct hlist_head **lists = (struct hlist_head **)s->private;
2537
2538 seq_putc(s, '{');
2539 clk_prepare_lock();
2540
2541 for (; *lists; lists++) {
2542 hlist_for_each_entry(c, *lists, child_node) {
2543 if (!first_node)
2544 seq_putc(s, ',');
2545 first_node = false;
2546 clk_dump_subtree(s, c, 0);
2547 }
2548 }
2549
2550 clk_prepare_unlock();
2551
2552 seq_puts(s, "}\n");
2553 return 0;
2554}
2555DEFINE_SHOW_ATTRIBUTE(clk_dump);
2556
2557static const struct {
2558 unsigned long flag;
2559 const char *name;
2560} clk_flags[] = {
2561#define ENTRY(f) { f, __stringify(f) }
2562 ENTRY(CLK_SET_RATE_GATE),
2563 ENTRY(CLK_SET_PARENT_GATE),
2564 ENTRY(CLK_SET_RATE_PARENT),
2565 ENTRY(CLK_IGNORE_UNUSED),
2566 ENTRY(CLK_IS_BASIC),
2567 ENTRY(CLK_GET_RATE_NOCACHE),
2568 ENTRY(CLK_SET_RATE_NO_REPARENT),
2569 ENTRY(CLK_GET_ACCURACY_NOCACHE),
2570 ENTRY(CLK_RECALC_NEW_RATES),
2571 ENTRY(CLK_SET_RATE_UNGATE),
2572 ENTRY(CLK_IS_CRITICAL),
2573 ENTRY(CLK_OPS_PARENT_ENABLE),
2574#undef ENTRY
2575};
2576
2577static int clk_flags_show(struct seq_file *s, void *data)
2578{
2579 struct clk_core *core = s->private;
2580 unsigned long flags = core->flags;
2581 unsigned int i;
2582
2583 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
2584 if (flags & clk_flags[i].flag) {
2585 seq_printf(s, "%s\n", clk_flags[i].name);
2586 flags &= ~clk_flags[i].flag;
2587 }
2588 }
2589 if (flags) {
2590 /* Unknown flags */
2591 seq_printf(s, "0x%lx\n", flags);
2592 }
2593
2594 return 0;
2595}
2596DEFINE_SHOW_ATTRIBUTE(clk_flags);
2597
2598static int possible_parents_show(struct seq_file *s, void *data)
2599{
2600 struct clk_core *core = s->private;
2601 int i;
2602
2603 for (i = 0; i < core->num_parents - 1; i++)
2604 seq_printf(s, "%s ", core->parent_names[i]);
2605
2606 seq_printf(s, "%s\n", core->parent_names[i]);
2607
2608 return 0;
2609}
2610DEFINE_SHOW_ATTRIBUTE(possible_parents);
2611
2612static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2613{
2614 struct dentry *d;
2615 int ret = -ENOMEM;
2616
2617 if (!core || !pdentry) {
2618 ret = -EINVAL;
2619 goto out;
2620 }
2621
2622 d = debugfs_create_dir(core->name, pdentry);
2623 if (!d)
2624 goto out;
2625
2626 core->dentry = d;
2627
2628 d = debugfs_create_ulong("clk_rate", 0444, core->dentry, &core->rate);
2629 if (!d)
2630 goto err_out;
2631
2632 d = debugfs_create_ulong("clk_accuracy", 0444, core->dentry,
2633 &core->accuracy);
2634 if (!d)
2635 goto err_out;
2636
2637 d = debugfs_create_u32("clk_phase", 0444, core->dentry, &core->phase);
2638 if (!d)
2639 goto err_out;
2640
2641 d = debugfs_create_file("clk_flags", 0444, core->dentry, core,
2642 &clk_flags_fops);
2643 if (!d)
2644 goto err_out;
2645
2646 d = debugfs_create_u32("clk_prepare_count", 0444, core->dentry,
2647 &core->prepare_count);
2648 if (!d)
2649 goto err_out;
2650
2651 d = debugfs_create_u32("clk_enable_count", 0444, core->dentry,
2652 &core->enable_count);
2653 if (!d)
2654 goto err_out;
2655
2656 d = debugfs_create_u32("clk_protect_count", 0444, core->dentry,
2657 &core->protect_count);
2658 if (!d)
2659 goto err_out;
2660
2661 d = debugfs_create_u32("clk_notifier_count", 0444, core->dentry,
2662 &core->notifier_count);
2663 if (!d)
2664 goto err_out;
2665
2666 if (core->num_parents > 1) {
2667 d = debugfs_create_file("clk_possible_parents", 0444,
2668 core->dentry, core, &possible_parents_fops);
2669 if (!d)
2670 goto err_out;
2671 }
2672
2673 if (core->ops->debug_init) {
2674 ret = core->ops->debug_init(core->hw, core->dentry);
2675 if (ret)
2676 goto err_out;
2677 }
2678
2679 ret = 0;
2680 goto out;
2681
2682err_out:
2683 debugfs_remove_recursive(core->dentry);
2684 core->dentry = NULL;
2685out:
2686 return ret;
2687}
2688
2689/**
2690 * clk_debug_register - add a clk node to the debugfs clk directory
2691 * @core: the clk being added to the debugfs clk directory
2692 *
2693 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2694 * initialized. Otherwise it bails out early since the debugfs clk directory
2695 * will be created lazily by clk_debug_init as part of a late_initcall.
2696 */
2697static int clk_debug_register(struct clk_core *core)
2698{
2699 int ret = 0;
2700
2701 mutex_lock(&clk_debug_lock);
2702 hlist_add_head(&core->debug_node, &clk_debug_list);
2703 if (inited)
2704 ret = clk_debug_create_one(core, rootdir);
2705 mutex_unlock(&clk_debug_lock);
2706
2707 return ret;
2708}
2709
2710 /**
2711 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2712 * @core: the clk being removed from the debugfs clk directory
2713 *
2714 * Dynamically removes a clk and all its child nodes from the
2715 * debugfs clk directory if clk->dentry points to debugfs created by
2716 * clk_debug_register in __clk_core_init.
2717 */
2718static void clk_debug_unregister(struct clk_core *core)
2719{
2720 mutex_lock(&clk_debug_lock);
2721 hlist_del_init(&core->debug_node);
2722 debugfs_remove_recursive(core->dentry);
2723 core->dentry = NULL;
2724 mutex_unlock(&clk_debug_lock);
2725}
2726
2727struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
2728 void *data, const struct file_operations *fops)
2729{
2730 struct dentry *d = NULL;
2731
2732 if (hw->core->dentry)
2733 d = debugfs_create_file(name, mode, hw->core->dentry, data,
2734 fops);
2735
2736 return d;
2737}
2738EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
2739
2740/**
2741 * clk_debug_init - lazily populate the debugfs clk directory
2742 *
2743 * clks are often initialized very early during boot before memory can be
2744 * dynamically allocated and well before debugfs is setup. This function
2745 * populates the debugfs clk directory once at boot-time when we know that
2746 * debugfs is setup. It should only be called once at boot-time, all other clks
2747 * added dynamically will be done so with clk_debug_register.
2748 */
2749static int __init clk_debug_init(void)
2750{
2751 struct clk_core *core;
2752 struct dentry *d;
2753
2754 rootdir = debugfs_create_dir("clk", NULL);
2755
2756 if (!rootdir)
2757 return -ENOMEM;
2758
2759 d = debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
2760 &clk_summary_fops);
2761 if (!d)
2762 return -ENOMEM;
2763
2764 d = debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
2765 &clk_dump_fops);
2766 if (!d)
2767 return -ENOMEM;
2768
2769 d = debugfs_create_file("clk_orphan_summary", 0444, rootdir,
2770 &orphan_list, &clk_summary_fops);
2771 if (!d)
2772 return -ENOMEM;
2773
2774 d = debugfs_create_file("clk_orphan_dump", 0444, rootdir,
2775 &orphan_list, &clk_dump_fops);
2776 if (!d)
2777 return -ENOMEM;
2778
2779 mutex_lock(&clk_debug_lock);
2780 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2781 clk_debug_create_one(core, rootdir);
2782
2783 inited = 1;
2784 mutex_unlock(&clk_debug_lock);
2785
2786 return 0;
2787}
2788late_initcall(clk_debug_init);
2789#else
2790static inline int clk_debug_register(struct clk_core *core) { return 0; }
2791static inline void clk_debug_reparent(struct clk_core *core,
2792 struct clk_core *new_parent)
2793{
2794}
2795static inline void clk_debug_unregister(struct clk_core *core)
2796{
2797}
2798#endif
2799
2800/**
2801 * __clk_core_init - initialize the data structures in a struct clk_core
2802 * @core: clk_core being initialized
2803 *
2804 * Initializes the lists in struct clk_core, queries the hardware for the
2805 * parent and rate and sets them both.
2806 */
2807static int __clk_core_init(struct clk_core *core)
2808{
2809 int i, ret;
2810 struct clk_core *orphan;
2811 struct hlist_node *tmp2;
2812 unsigned long rate;
2813
2814 if (!core)
2815 return -EINVAL;
2816
2817 clk_prepare_lock();
2818
2819 ret = clk_pm_runtime_get(core);
2820 if (ret)
2821 goto unlock;
2822
2823 /* check to see if a clock with this name is already registered */
2824 if (clk_core_lookup(core->name)) {
2825 pr_debug("%s: clk %s already initialized\n",
2826 __func__, core->name);
2827 ret = -EEXIST;
2828 goto out;
2829 }
2830
2831 /* check that clk_ops are sane. See Documentation/clk.txt */
2832 if (core->ops->set_rate &&
2833 !((core->ops->round_rate || core->ops->determine_rate) &&
2834 core->ops->recalc_rate)) {
2835 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2836 __func__, core->name);
2837 ret = -EINVAL;
2838 goto out;
2839 }
2840
2841 if (core->ops->set_parent && !core->ops->get_parent) {
2842 pr_err("%s: %s must implement .get_parent & .set_parent\n",
2843 __func__, core->name);
2844 ret = -EINVAL;
2845 goto out;
2846 }
2847
2848 if (core->num_parents > 1 && !core->ops->get_parent) {
2849 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
2850 __func__, core->name);
2851 ret = -EINVAL;
2852 goto out;
2853 }
2854
2855 if (core->ops->set_rate_and_parent &&
2856 !(core->ops->set_parent && core->ops->set_rate)) {
2857 pr_err("%s: %s must implement .set_parent & .set_rate\n",
2858 __func__, core->name);
2859 ret = -EINVAL;
2860 goto out;
2861 }
2862
2863 /* throw a WARN if any entries in parent_names are NULL */
2864 for (i = 0; i < core->num_parents; i++)
2865 WARN(!core->parent_names[i],
2866 "%s: invalid NULL in %s's .parent_names\n",
2867 __func__, core->name);
2868
2869 core->parent = __clk_init_parent(core);
2870
2871 /*
2872 * Populate core->parent if parent has already been clk_core_init'd. If
2873 * parent has not yet been clk_core_init'd then place clk in the orphan
2874 * list. If clk doesn't have any parents then place it in the root
2875 * clk list.
2876 *
2877 * Every time a new clk is clk_init'd then we walk the list of orphan
2878 * clocks and re-parent any that are children of the clock currently
2879 * being clk_init'd.
2880 */
2881 if (core->parent) {
2882 hlist_add_head(&core->child_node,
2883 &core->parent->children);
2884 core->orphan = core->parent->orphan;
2885 } else if (!core->num_parents) {
2886 hlist_add_head(&core->child_node, &clk_root_list);
2887 core->orphan = false;
2888 } else {
2889 hlist_add_head(&core->child_node, &clk_orphan_list);
2890 core->orphan = true;
2891 }
2892
2893 /*
2894 * optional platform-specific magic
2895 *
2896 * The .init callback is not used by any of the basic clock types, but
2897 * exists for weird hardware that must perform initialization magic.
2898 * Please consider other ways of solving initialization problems before
2899 * using this callback, as its use is discouraged.
2900 */
2901 if (core->ops->init)
2902 core->ops->init(core->hw);
2903
2904 /*
2905 * Set clk's accuracy. The preferred method is to use
2906 * .recalc_accuracy. For simple clocks and lazy developers the default
2907 * fallback is to use the parent's accuracy. If a clock doesn't have a
2908 * parent (or is orphaned) then accuracy is set to zero (perfect
2909 * clock).
2910 */
2911 if (core->ops->recalc_accuracy)
2912 core->accuracy = core->ops->recalc_accuracy(core->hw,
2913 __clk_get_accuracy(core->parent));
2914 else if (core->parent)
2915 core->accuracy = core->parent->accuracy;
2916 else
2917 core->accuracy = 0;
2918
2919 /*
2920 * Set clk's phase.
2921 * Since a phase is by definition relative to its parent, just
2922 * query the current clock phase, or just assume it's in phase.
2923 */
2924 if (core->ops->get_phase)
2925 core->phase = core->ops->get_phase(core->hw);
2926 else
2927 core->phase = 0;
2928
2929 /*
2930 * Set clk's rate. The preferred method is to use .recalc_rate. For
2931 * simple clocks and lazy developers the default fallback is to use the
2932 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2933 * then rate is set to zero.
2934 */
2935 if (core->ops->recalc_rate)
2936 rate = core->ops->recalc_rate(core->hw,
2937 clk_core_get_rate_nolock(core->parent));
2938 else if (core->parent)
2939 rate = core->parent->rate;
2940 else
2941 rate = 0;
2942 core->rate = core->req_rate = rate;
2943
2944 /*
2945 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
2946 * don't get accidentally disabled when walking the orphan tree and
2947 * reparenting clocks
2948 */
2949 if (core->flags & CLK_IS_CRITICAL) {
2950 unsigned long flags;
2951
2952 clk_core_prepare(core);
2953
2954 flags = clk_enable_lock();
2955 clk_core_enable(core);
2956 clk_enable_unlock(flags);
2957 }
2958
2959 /*
2960 * walk the list of orphan clocks and reparent any that newly finds a
2961 * parent.
2962 */
2963 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
2964 struct clk_core *parent = __clk_init_parent(orphan);
2965
2966 /*
2967 * We need to use __clk_set_parent_before() and _after() to
2968 * to properly migrate any prepare/enable count of the orphan
2969 * clock. This is important for CLK_IS_CRITICAL clocks, which
2970 * are enabled during init but might not have a parent yet.
2971 */
2972 if (parent) {
2973 /* update the clk tree topology */
2974 __clk_set_parent_before(orphan, parent);
2975 __clk_set_parent_after(orphan, parent, NULL);
2976 __clk_recalc_accuracies(orphan);
2977 __clk_recalc_rates(orphan, 0);
2978 }
2979 }
2980
2981 kref_init(&core->ref);
2982out:
2983 clk_pm_runtime_put(core);
2984unlock:
2985 clk_prepare_unlock();
2986
2987 if (!ret)
2988 clk_debug_register(core);
2989
2990 return ret;
2991}
2992
2993struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2994 const char *con_id)
2995{
2996 struct clk *clk;
2997
2998 /* This is to allow this function to be chained to others */
2999 if (IS_ERR_OR_NULL(hw))
3000 return ERR_CAST(hw);
3001
3002 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3003 if (!clk)
3004 return ERR_PTR(-ENOMEM);
3005
3006 clk->core = hw->core;
3007 clk->dev_id = dev_id;
3008 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3009 clk->max_rate = ULONG_MAX;
3010
3011 clk_prepare_lock();
3012 hlist_add_head(&clk->clks_node, &hw->core->clks);
3013 clk_prepare_unlock();
3014
3015 return clk;
3016}
3017
3018void __clk_free_clk(struct clk *clk)
3019{
3020 clk_prepare_lock();
3021 hlist_del(&clk->clks_node);
3022 clk_prepare_unlock();
3023
3024 kfree_const(clk->con_id);
3025 kfree(clk);
3026}
3027
3028/**
3029 * clk_register - allocate a new clock, register it and return an opaque cookie
3030 * @dev: device that is registering this clock
3031 * @hw: link to hardware-specific clock data
3032 *
3033 * clk_register is the primary interface for populating the clock tree with new
3034 * clock nodes. It returns a pointer to the newly allocated struct clk which
3035 * cannot be dereferenced by driver code but may be used in conjunction with the
3036 * rest of the clock API. In the event of an error clk_register will return an
3037 * error code; drivers must test for an error code after calling clk_register.
3038 */
3039struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3040{
3041 int i, ret;
3042 struct clk_core *core;
3043
3044 core = kzalloc(sizeof(*core), GFP_KERNEL);
3045 if (!core) {
3046 ret = -ENOMEM;
3047 goto fail_out;
3048 }
3049
3050 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
3051 if (!core->name) {
3052 ret = -ENOMEM;
3053 goto fail_name;
3054 }
3055
3056 if (WARN_ON(!hw->init->ops)) {
3057 ret = -EINVAL;
3058 goto fail_ops;
3059 }
3060 core->ops = hw->init->ops;
3061
3062 if (dev && pm_runtime_enabled(dev))
3063 core->dev = dev;
3064 if (dev && dev->driver)
3065 core->owner = dev->driver->owner;
3066 core->hw = hw;
3067 core->flags = hw->init->flags;
3068 core->num_parents = hw->init->num_parents;
3069 core->min_rate = 0;
3070 core->max_rate = ULONG_MAX;
3071 hw->core = core;
3072
3073 /* allocate local copy in case parent_names is __initdata */
3074 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
3075 GFP_KERNEL);
3076
3077 if (!core->parent_names) {
3078 ret = -ENOMEM;
3079 goto fail_parent_names;
3080 }
3081
3082
3083 /* copy each string name in case parent_names is __initdata */
3084 for (i = 0; i < core->num_parents; i++) {
3085 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
3086 GFP_KERNEL);
3087 if (!core->parent_names[i]) {
3088 ret = -ENOMEM;
3089 goto fail_parent_names_copy;
3090 }
3091 }
3092
3093 /* avoid unnecessary string look-ups of clk_core's possible parents. */
3094 core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
3095 GFP_KERNEL);
3096 if (!core->parents) {
3097 ret = -ENOMEM;
3098 goto fail_parents;
3099 };
3100
3101 INIT_HLIST_HEAD(&core->clks);
3102
3103 hw->clk = __clk_create_clk(hw, NULL, NULL);
3104 if (IS_ERR(hw->clk)) {
3105 ret = PTR_ERR(hw->clk);
3106 goto fail_parents;
3107 }
3108
3109 ret = __clk_core_init(core);
3110 if (!ret)
3111 return hw->clk;
3112
3113 __clk_free_clk(hw->clk);
3114 hw->clk = NULL;
3115
3116fail_parents:
3117 kfree(core->parents);
3118fail_parent_names_copy:
3119 while (--i >= 0)
3120 kfree_const(core->parent_names[i]);
3121 kfree(core->parent_names);
3122fail_parent_names:
3123fail_ops:
3124 kfree_const(core->name);
3125fail_name:
3126 kfree(core);
3127fail_out:
3128 return ERR_PTR(ret);
3129}
3130EXPORT_SYMBOL_GPL(clk_register);
3131
3132/**
3133 * clk_hw_register - register a clk_hw and return an error code
3134 * @dev: device that is registering this clock
3135 * @hw: link to hardware-specific clock data
3136 *
3137 * clk_hw_register is the primary interface for populating the clock tree with
3138 * new clock nodes. It returns an integer equal to zero indicating success or
3139 * less than zero indicating failure. Drivers must test for an error code after
3140 * calling clk_hw_register().
3141 */
3142int clk_hw_register(struct device *dev, struct clk_hw *hw)
3143{
3144 return PTR_ERR_OR_ZERO(clk_register(dev, hw));
3145}
3146EXPORT_SYMBOL_GPL(clk_hw_register);
3147
3148/* Free memory allocated for a clock. */
3149static void __clk_release(struct kref *ref)
3150{
3151 struct clk_core *core = container_of(ref, struct clk_core, ref);
3152 int i = core->num_parents;
3153
3154 lockdep_assert_held(&prepare_lock);
3155
3156 kfree(core->parents);
3157 while (--i >= 0)
3158 kfree_const(core->parent_names[i]);
3159
3160 kfree(core->parent_names);
3161 kfree_const(core->name);
3162 kfree(core);
3163}
3164
3165/*
3166 * Empty clk_ops for unregistered clocks. These are used temporarily
3167 * after clk_unregister() was called on a clock and until last clock
3168 * consumer calls clk_put() and the struct clk object is freed.
3169 */
3170static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3171{
3172 return -ENXIO;
3173}
3174
3175static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3176{
3177 WARN_ON_ONCE(1);
3178}
3179
3180static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3181 unsigned long parent_rate)
3182{
3183 return -ENXIO;
3184}
3185
3186static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3187{
3188 return -ENXIO;
3189}
3190
3191static const struct clk_ops clk_nodrv_ops = {
3192 .enable = clk_nodrv_prepare_enable,
3193 .disable = clk_nodrv_disable_unprepare,
3194 .prepare = clk_nodrv_prepare_enable,
3195 .unprepare = clk_nodrv_disable_unprepare,
3196 .set_rate = clk_nodrv_set_rate,
3197 .set_parent = clk_nodrv_set_parent,
3198};
3199
3200/**
3201 * clk_unregister - unregister a currently registered clock
3202 * @clk: clock to unregister
3203 */
3204void clk_unregister(struct clk *clk)
3205{
3206 unsigned long flags;
3207
3208 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3209 return;
3210
3211 clk_debug_unregister(clk->core);
3212
3213 clk_prepare_lock();
3214
3215 if (clk->core->ops == &clk_nodrv_ops) {
3216 pr_err("%s: unregistered clock: %s\n", __func__,
3217 clk->core->name);
3218 goto unlock;
3219 }
3220 /*
3221 * Assign empty clock ops for consumers that might still hold
3222 * a reference to this clock.
3223 */
3224 flags = clk_enable_lock();
3225 clk->core->ops = &clk_nodrv_ops;
3226 clk_enable_unlock(flags);
3227
3228 if (!hlist_empty(&clk->core->children)) {
3229 struct clk_core *child;
3230 struct hlist_node *t;
3231
3232 /* Reparent all children to the orphan list. */
3233 hlist_for_each_entry_safe(child, t, &clk->core->children,
3234 child_node)
3235 clk_core_set_parent_nolock(child, NULL);
3236 }
3237
3238 hlist_del_init(&clk->core->child_node);
3239
3240 if (clk->core->prepare_count)
3241 pr_warn("%s: unregistering prepared clock: %s\n",
3242 __func__, clk->core->name);
3243
3244 if (clk->core->protect_count)
3245 pr_warn("%s: unregistering protected clock: %s\n",
3246 __func__, clk->core->name);
3247
3248 kref_put(&clk->core->ref, __clk_release);
3249unlock:
3250 clk_prepare_unlock();
3251}
3252EXPORT_SYMBOL_GPL(clk_unregister);
3253
3254/**
3255 * clk_hw_unregister - unregister a currently registered clk_hw
3256 * @hw: hardware-specific clock data to unregister
3257 */
3258void clk_hw_unregister(struct clk_hw *hw)
3259{
3260 clk_unregister(hw->clk);
3261}
3262EXPORT_SYMBOL_GPL(clk_hw_unregister);
3263
3264static void devm_clk_release(struct device *dev, void *res)
3265{
3266 clk_unregister(*(struct clk **)res);
3267}
3268
3269static void devm_clk_hw_release(struct device *dev, void *res)
3270{
3271 clk_hw_unregister(*(struct clk_hw **)res);
3272}
3273
3274/**
3275 * devm_clk_register - resource managed clk_register()
3276 * @dev: device that is registering this clock
3277 * @hw: link to hardware-specific clock data
3278 *
3279 * Managed clk_register(). Clocks returned from this function are
3280 * automatically clk_unregister()ed on driver detach. See clk_register() for
3281 * more information.
3282 */
3283struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
3284{
3285 struct clk *clk;
3286 struct clk **clkp;
3287
3288 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
3289 if (!clkp)
3290 return ERR_PTR(-ENOMEM);
3291
3292 clk = clk_register(dev, hw);
3293 if (!IS_ERR(clk)) {
3294 *clkp = clk;
3295 devres_add(dev, clkp);
3296 } else {
3297 devres_free(clkp);
3298 }
3299
3300 return clk;
3301}
3302EXPORT_SYMBOL_GPL(devm_clk_register);
3303
3304/**
3305 * devm_clk_hw_register - resource managed clk_hw_register()
3306 * @dev: device that is registering this clock
3307 * @hw: link to hardware-specific clock data
3308 *
3309 * Managed clk_hw_register(). Clocks registered by this function are
3310 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
3311 * for more information.
3312 */
3313int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
3314{
3315 struct clk_hw **hwp;
3316 int ret;
3317
3318 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
3319 if (!hwp)
3320 return -ENOMEM;
3321
3322 ret = clk_hw_register(dev, hw);
3323 if (!ret) {
3324 *hwp = hw;
3325 devres_add(dev, hwp);
3326 } else {
3327 devres_free(hwp);
3328 }
3329
3330 return ret;
3331}
3332EXPORT_SYMBOL_GPL(devm_clk_hw_register);
3333
3334static int devm_clk_match(struct device *dev, void *res, void *data)
3335{
3336 struct clk *c = res;
3337 if (WARN_ON(!c))
3338 return 0;
3339 return c == data;
3340}
3341
3342static int devm_clk_hw_match(struct device *dev, void *res, void *data)
3343{
3344 struct clk_hw *hw = res;
3345
3346 if (WARN_ON(!hw))
3347 return 0;
3348 return hw == data;
3349}
3350
3351/**
3352 * devm_clk_unregister - resource managed clk_unregister()
3353 * @clk: clock to unregister
3354 *
3355 * Deallocate a clock allocated with devm_clk_register(). Normally
3356 * this function will not need to be called and the resource management
3357 * code will ensure that the resource is freed.
3358 */
3359void devm_clk_unregister(struct device *dev, struct clk *clk)
3360{
3361 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
3362}
3363EXPORT_SYMBOL_GPL(devm_clk_unregister);
3364
3365/**
3366 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
3367 * @dev: device that is unregistering the hardware-specific clock data
3368 * @hw: link to hardware-specific clock data
3369 *
3370 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
3371 * this function will not need to be called and the resource management
3372 * code will ensure that the resource is freed.
3373 */
3374void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
3375{
3376 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
3377 hw));
3378}
3379EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
3380
3381/*
3382 * clkdev helpers
3383 */
3384int __clk_get(struct clk *clk)
3385{
3386 struct clk_core *core = !clk ? NULL : clk->core;
3387
3388 if (core) {
3389 if (!try_module_get(core->owner))
3390 return 0;
3391
3392 kref_get(&core->ref);
3393 }
3394 return 1;
3395}
3396
3397void __clk_put(struct clk *clk)
3398{
3399 struct module *owner;
3400
3401 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3402 return;
3403
3404 clk_prepare_lock();
3405
3406 /*
3407 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
3408 * given user should be balanced with calls to clk_rate_exclusive_put()
3409 * and by that same consumer
3410 */
3411 if (WARN_ON(clk->exclusive_count)) {
3412 /* We voiced our concern, let's sanitize the situation */
3413 clk->core->protect_count -= (clk->exclusive_count - 1);
3414 clk_core_rate_unprotect(clk->core);
3415 clk->exclusive_count = 0;
3416 }
3417
3418 hlist_del(&clk->clks_node);
3419 if (clk->min_rate > clk->core->req_rate ||
3420 clk->max_rate < clk->core->req_rate)
3421 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
3422
3423 owner = clk->core->owner;
3424 kref_put(&clk->core->ref, __clk_release);
3425
3426 clk_prepare_unlock();
3427
3428 module_put(owner);
3429
3430 kfree(clk);
3431}
3432
3433/*** clk rate change notifiers ***/
3434
3435/**
3436 * clk_notifier_register - add a clk rate change notifier
3437 * @clk: struct clk * to watch
3438 * @nb: struct notifier_block * with callback info
3439 *
3440 * Request notification when clk's rate changes. This uses an SRCU
3441 * notifier because we want it to block and notifier unregistrations are
3442 * uncommon. The callbacks associated with the notifier must not
3443 * re-enter into the clk framework by calling any top-level clk APIs;
3444 * this will cause a nested prepare_lock mutex.
3445 *
3446 * In all notification cases (pre, post and abort rate change) the original
3447 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
3448 * and the new frequency is passed via struct clk_notifier_data.new_rate.
3449 *
3450 * clk_notifier_register() must be called from non-atomic context.
3451 * Returns -EINVAL if called with null arguments, -ENOMEM upon
3452 * allocation failure; otherwise, passes along the return value of
3453 * srcu_notifier_chain_register().
3454 */
3455int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
3456{
3457 struct clk_notifier *cn;
3458 int ret = -ENOMEM;
3459
3460 if (!clk || !nb)
3461 return -EINVAL;
3462
3463 clk_prepare_lock();
3464
3465 /* search the list of notifiers for this clk */
3466 list_for_each_entry(cn, &clk_notifier_list, node)
3467 if (cn->clk == clk)
3468 break;
3469
3470 /* if clk wasn't in the notifier list, allocate new clk_notifier */
3471 if (cn->clk != clk) {
3472 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
3473 if (!cn)
3474 goto out;
3475
3476 cn->clk = clk;
3477 srcu_init_notifier_head(&cn->notifier_head);
3478
3479 list_add(&cn->node, &clk_notifier_list);
3480 }
3481
3482 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
3483
3484 clk->core->notifier_count++;
3485
3486out:
3487 clk_prepare_unlock();
3488
3489 return ret;
3490}
3491EXPORT_SYMBOL_GPL(clk_notifier_register);
3492
3493/**
3494 * clk_notifier_unregister - remove a clk rate change notifier
3495 * @clk: struct clk *
3496 * @nb: struct notifier_block * with callback info
3497 *
3498 * Request no further notification for changes to 'clk' and frees memory
3499 * allocated in clk_notifier_register.
3500 *
3501 * Returns -EINVAL if called with null arguments; otherwise, passes
3502 * along the return value of srcu_notifier_chain_unregister().
3503 */
3504int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
3505{
3506 struct clk_notifier *cn = NULL;
3507 int ret = -EINVAL;
3508
3509 if (!clk || !nb)
3510 return -EINVAL;
3511
3512 clk_prepare_lock();
3513
3514 list_for_each_entry(cn, &clk_notifier_list, node)
3515 if (cn->clk == clk)
3516 break;
3517
3518 if (cn->clk == clk) {
3519 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
3520
3521 clk->core->notifier_count--;
3522
3523 /* XXX the notifier code should handle this better */
3524 if (!cn->notifier_head.head) {
3525 srcu_cleanup_notifier_head(&cn->notifier_head);
3526 list_del(&cn->node);
3527 kfree(cn);
3528 }
3529
3530 } else {
3531 ret = -ENOENT;
3532 }
3533
3534 clk_prepare_unlock();
3535
3536 return ret;
3537}
3538EXPORT_SYMBOL_GPL(clk_notifier_unregister);
3539
3540#ifdef CONFIG_OF
3541/**
3542 * struct of_clk_provider - Clock provider registration structure
3543 * @link: Entry in global list of clock providers
3544 * @node: Pointer to device tree node of clock provider
3545 * @get: Get clock callback. Returns NULL or a struct clk for the
3546 * given clock specifier
3547 * @data: context pointer to be passed into @get callback
3548 */
3549struct of_clk_provider {
3550 struct list_head link;
3551
3552 struct device_node *node;
3553 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
3554 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
3555 void *data;
3556};
3557
3558static const struct of_device_id __clk_of_table_sentinel
3559 __used __section(__clk_of_table_end);
3560
3561static LIST_HEAD(of_clk_providers);
3562static DEFINE_MUTEX(of_clk_mutex);
3563
3564struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
3565 void *data)
3566{
3567 return data;
3568}
3569EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
3570
3571struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
3572{
3573 return data;
3574}
3575EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
3576
3577struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
3578{
3579 struct clk_onecell_data *clk_data = data;
3580 unsigned int idx = clkspec->args[0];
3581
3582 if (idx >= clk_data->clk_num) {
3583 pr_err("%s: invalid clock index %u\n", __func__, idx);
3584 return ERR_PTR(-EINVAL);
3585 }
3586
3587 return clk_data->clks[idx];
3588}
3589EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
3590
3591struct clk_hw *
3592of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
3593{
3594 struct clk_hw_onecell_data *hw_data = data;
3595 unsigned int idx = clkspec->args[0];
3596
3597 if (idx >= hw_data->num) {
3598 pr_err("%s: invalid index %u\n", __func__, idx);
3599 return ERR_PTR(-EINVAL);
3600 }
3601
3602 return hw_data->hws[idx];
3603}
3604EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
3605
3606/**
3607 * of_clk_add_provider() - Register a clock provider for a node
3608 * @np: Device node pointer associated with clock provider
3609 * @clk_src_get: callback for decoding clock
3610 * @data: context pointer for @clk_src_get callback.
3611 */
3612int of_clk_add_provider(struct device_node *np,
3613 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
3614 void *data),
3615 void *data)
3616{
3617 struct of_clk_provider *cp;
3618 int ret;
3619
3620 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3621 if (!cp)
3622 return -ENOMEM;
3623
3624 cp->node = of_node_get(np);
3625 cp->data = data;
3626 cp->get = clk_src_get;
3627
3628 mutex_lock(&of_clk_mutex);
3629 list_add(&cp->link, &of_clk_providers);
3630 mutex_unlock(&of_clk_mutex);
3631 pr_debug("Added clock from %pOF\n", np);
3632
3633 ret = of_clk_set_defaults(np, true);
3634 if (ret < 0)
3635 of_clk_del_provider(np);
3636
3637 return ret;
3638}
3639EXPORT_SYMBOL_GPL(of_clk_add_provider);
3640
3641/**
3642 * of_clk_add_hw_provider() - Register a clock provider for a node
3643 * @np: Device node pointer associated with clock provider
3644 * @get: callback for decoding clk_hw
3645 * @data: context pointer for @get callback.
3646 */
3647int of_clk_add_hw_provider(struct device_node *np,
3648 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3649 void *data),
3650 void *data)
3651{
3652 struct of_clk_provider *cp;
3653 int ret;
3654
3655 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3656 if (!cp)
3657 return -ENOMEM;
3658
3659 cp->node = of_node_get(np);
3660 cp->data = data;
3661 cp->get_hw = get;
3662
3663 mutex_lock(&of_clk_mutex);
3664 list_add(&cp->link, &of_clk_providers);
3665 mutex_unlock(&of_clk_mutex);
3666 pr_debug("Added clk_hw provider from %pOF\n", np);
3667
3668 ret = of_clk_set_defaults(np, true);
3669 if (ret < 0)
3670 of_clk_del_provider(np);
3671
3672 return ret;
3673}
3674EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
3675
3676static void devm_of_clk_release_provider(struct device *dev, void *res)
3677{
3678 of_clk_del_provider(*(struct device_node **)res);
3679}
3680
3681int devm_of_clk_add_hw_provider(struct device *dev,
3682 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3683 void *data),
3684 void *data)
3685{
3686 struct device_node **ptr, *np;
3687 int ret;
3688
3689 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
3690 GFP_KERNEL);
3691 if (!ptr)
3692 return -ENOMEM;
3693
3694 np = dev->of_node;
3695 ret = of_clk_add_hw_provider(np, get, data);
3696 if (!ret) {
3697 *ptr = np;
3698 devres_add(dev, ptr);
3699 } else {
3700 devres_free(ptr);
3701 }
3702
3703 return ret;
3704}
3705EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
3706
3707/**
3708 * of_clk_del_provider() - Remove a previously registered clock provider
3709 * @np: Device node pointer associated with clock provider
3710 */
3711void of_clk_del_provider(struct device_node *np)
3712{
3713 struct of_clk_provider *cp;
3714
3715 mutex_lock(&of_clk_mutex);
3716 list_for_each_entry(cp, &of_clk_providers, link) {
3717 if (cp->node == np) {
3718 list_del(&cp->link);
3719 of_node_put(cp->node);
3720 kfree(cp);
3721 break;
3722 }
3723 }
3724 mutex_unlock(&of_clk_mutex);
3725}
3726EXPORT_SYMBOL_GPL(of_clk_del_provider);
3727
3728static int devm_clk_provider_match(struct device *dev, void *res, void *data)
3729{
3730 struct device_node **np = res;
3731
3732 if (WARN_ON(!np || !*np))
3733 return 0;
3734
3735 return *np == data;
3736}
3737
3738void devm_of_clk_del_provider(struct device *dev)
3739{
3740 int ret;
3741
3742 ret = devres_release(dev, devm_of_clk_release_provider,
3743 devm_clk_provider_match, dev->of_node);
3744
3745 WARN_ON(ret);
3746}
3747EXPORT_SYMBOL(devm_of_clk_del_provider);
3748
3749static struct clk_hw *
3750__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
3751 struct of_phandle_args *clkspec)
3752{
3753 struct clk *clk;
3754
3755 if (provider->get_hw)
3756 return provider->get_hw(clkspec, provider->data);
3757
3758 clk = provider->get(clkspec, provider->data);
3759 if (IS_ERR(clk))
3760 return ERR_CAST(clk);
3761 return __clk_get_hw(clk);
3762}
3763
3764struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
3765 const char *dev_id, const char *con_id)
3766{
3767 struct of_clk_provider *provider;
3768 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
3769 struct clk_hw *hw;
3770
3771 if (!clkspec)
3772 return ERR_PTR(-EINVAL);
3773
3774 /* Check if we have such a provider in our array */
3775 mutex_lock(&of_clk_mutex);
3776 list_for_each_entry(provider, &of_clk_providers, link) {
3777 if (provider->node == clkspec->np) {
3778 hw = __of_clk_get_hw_from_provider(provider, clkspec);
3779 clk = __clk_create_clk(hw, dev_id, con_id);
3780 }
3781
3782 if (!IS_ERR(clk)) {
3783 if (!__clk_get(clk)) {
3784 __clk_free_clk(clk);
3785 clk = ERR_PTR(-ENOENT);
3786 }
3787
3788 break;
3789 }
3790 }
3791 mutex_unlock(&of_clk_mutex);
3792
3793 return clk;
3794}
3795
3796/**
3797 * of_clk_get_from_provider() - Lookup a clock from a clock provider
3798 * @clkspec: pointer to a clock specifier data structure
3799 *
3800 * This function looks up a struct clk from the registered list of clock
3801 * providers, an input is a clock specifier data structure as returned
3802 * from the of_parse_phandle_with_args() function call.
3803 */
3804struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
3805{
3806 return __of_clk_get_from_provider(clkspec, NULL, __func__);
3807}
3808EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
3809
3810/**
3811 * of_clk_get_parent_count() - Count the number of clocks a device node has
3812 * @np: device node to count
3813 *
3814 * Returns: The number of clocks that are possible parents of this node
3815 */
3816unsigned int of_clk_get_parent_count(struct device_node *np)
3817{
3818 int count;
3819
3820 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
3821 if (count < 0)
3822 return 0;
3823
3824 return count;
3825}
3826EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3827
3828const char *of_clk_get_parent_name(struct device_node *np, int index)
3829{
3830 struct of_phandle_args clkspec;
3831 struct property *prop;
3832 const char *clk_name;
3833 const __be32 *vp;
3834 u32 pv;
3835 int rc;
3836 int count;
3837 struct clk *clk;
3838
3839 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3840 &clkspec);
3841 if (rc)
3842 return NULL;
3843
3844 index = clkspec.args_count ? clkspec.args[0] : 0;
3845 count = 0;
3846
3847 /* if there is an indices property, use it to transfer the index
3848 * specified into an array offset for the clock-output-names property.
3849 */
3850 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3851 if (index == pv) {
3852 index = count;
3853 break;
3854 }
3855 count++;
3856 }
3857 /* We went off the end of 'clock-indices' without finding it */
3858 if (prop && !vp)
3859 return NULL;
3860
3861 if (of_property_read_string_index(clkspec.np, "clock-output-names",
3862 index,
3863 &clk_name) < 0) {
3864 /*
3865 * Best effort to get the name if the clock has been
3866 * registered with the framework. If the clock isn't
3867 * registered, we return the node name as the name of
3868 * the clock as long as #clock-cells = 0.
3869 */
3870 clk = of_clk_get_from_provider(&clkspec);
3871 if (IS_ERR(clk)) {
3872 if (clkspec.args_count == 0)
3873 clk_name = clkspec.np->name;
3874 else
3875 clk_name = NULL;
3876 } else {
3877 clk_name = __clk_get_name(clk);
3878 clk_put(clk);
3879 }
3880 }
3881
3882
3883 of_node_put(clkspec.np);
3884 return clk_name;
3885}
3886EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3887
3888/**
3889 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
3890 * number of parents
3891 * @np: Device node pointer associated with clock provider
3892 * @parents: pointer to char array that hold the parents' names
3893 * @size: size of the @parents array
3894 *
3895 * Return: number of parents for the clock node.
3896 */
3897int of_clk_parent_fill(struct device_node *np, const char **parents,
3898 unsigned int size)
3899{
3900 unsigned int i = 0;
3901
3902 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3903 i++;
3904
3905 return i;
3906}
3907EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3908
3909struct clock_provider {
3910 of_clk_init_cb_t clk_init_cb;
3911 struct device_node *np;
3912 struct list_head node;
3913};
3914
3915/*
3916 * This function looks for a parent clock. If there is one, then it
3917 * checks that the provider for this parent clock was initialized, in
3918 * this case the parent clock will be ready.
3919 */
3920static int parent_ready(struct device_node *np)
3921{
3922 int i = 0;
3923
3924 while (true) {
3925 struct clk *clk = of_clk_get(np, i);
3926
3927 /* this parent is ready we can check the next one */
3928 if (!IS_ERR(clk)) {
3929 clk_put(clk);
3930 i++;
3931 continue;
3932 }
3933
3934 /* at least one parent is not ready, we exit now */
3935 if (PTR_ERR(clk) == -EPROBE_DEFER)
3936 return 0;
3937
3938 /*
3939 * Here we make assumption that the device tree is
3940 * written correctly. So an error means that there is
3941 * no more parent. As we didn't exit yet, then the
3942 * previous parent are ready. If there is no clock
3943 * parent, no need to wait for them, then we can
3944 * consider their absence as being ready
3945 */
3946 return 1;
3947 }
3948}
3949
3950/**
3951 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
3952 * @np: Device node pointer associated with clock provider
3953 * @index: clock index
3954 * @flags: pointer to top-level framework flags
3955 *
3956 * Detects if the clock-critical property exists and, if so, sets the
3957 * corresponding CLK_IS_CRITICAL flag.
3958 *
3959 * Do not use this function. It exists only for legacy Device Tree
3960 * bindings, such as the one-clock-per-node style that are outdated.
3961 * Those bindings typically put all clock data into .dts and the Linux
3962 * driver has no clock data, thus making it impossible to set this flag
3963 * correctly from the driver. Only those drivers may call
3964 * of_clk_detect_critical from their setup functions.
3965 *
3966 * Return: error code or zero on success
3967 */
3968int of_clk_detect_critical(struct device_node *np,
3969 int index, unsigned long *flags)
3970{
3971 struct property *prop;
3972 const __be32 *cur;
3973 uint32_t idx;
3974
3975 if (!np || !flags)
3976 return -EINVAL;
3977
3978 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
3979 if (index == idx)
3980 *flags |= CLK_IS_CRITICAL;
3981
3982 return 0;
3983}
3984
3985/**
3986 * of_clk_init() - Scan and init clock providers from the DT
3987 * @matches: array of compatible values and init functions for providers.
3988 *
3989 * This function scans the device tree for matching clock providers
3990 * and calls their initialization functions. It also does it by trying
3991 * to follow the dependencies.
3992 */
3993void __init of_clk_init(const struct of_device_id *matches)
3994{
3995 const struct of_device_id *match;
3996 struct device_node *np;
3997 struct clock_provider *clk_provider, *next;
3998 bool is_init_done;
3999 bool force = false;
4000 LIST_HEAD(clk_provider_list);
4001
4002 if (!matches)
4003 matches = &__clk_of_table;
4004
4005 /* First prepare the list of the clocks providers */
4006 for_each_matching_node_and_match(np, matches, &match) {
4007 struct clock_provider *parent;
4008
4009 if (!of_device_is_available(np))
4010 continue;
4011
4012 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
4013 if (!parent) {
4014 list_for_each_entry_safe(clk_provider, next,
4015 &clk_provider_list, node) {
4016 list_del(&clk_provider->node);
4017 of_node_put(clk_provider->np);
4018 kfree(clk_provider);
4019 }
4020 of_node_put(np);
4021 return;
4022 }
4023
4024 parent->clk_init_cb = match->data;
4025 parent->np = of_node_get(np);
4026 list_add_tail(&parent->node, &clk_provider_list);
4027 }
4028
4029 while (!list_empty(&clk_provider_list)) {
4030 is_init_done = false;
4031 list_for_each_entry_safe(clk_provider, next,
4032 &clk_provider_list, node) {
4033 if (force || parent_ready(clk_provider->np)) {
4034
4035 /* Don't populate platform devices */
4036 of_node_set_flag(clk_provider->np,
4037 OF_POPULATED);
4038
4039 clk_provider->clk_init_cb(clk_provider->np);
4040 of_clk_set_defaults(clk_provider->np, true);
4041
4042 list_del(&clk_provider->node);
4043 of_node_put(clk_provider->np);
4044 kfree(clk_provider);
4045 is_init_done = true;
4046 }
4047 }
4048
4049 /*
4050 * We didn't manage to initialize any of the
4051 * remaining providers during the last loop, so now we
4052 * initialize all the remaining ones unconditionally
4053 * in case the clock parent was not mandatory
4054 */
4055 if (!is_init_done)
4056 force = true;
4057 }
4058}
4059#endif