Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * SuperH clock framework
  3 *
  4 *  Copyright (C) 2005 - 2010  Paul Mundt
  5 *
  6 * This clock framework is derived from the OMAP version by:
  7 *
  8 *	Copyright (C) 2004 - 2008 Nokia Corporation
  9 *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
 10 *
 11 *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
 12 *
 13 * This file is subject to the terms and conditions of the GNU General Public
 14 * License.  See the file "COPYING" in the main directory of this archive
 15 * for more details.
 16 */
 17#define pr_fmt(fmt) "clock: " fmt
 18
 19#include <linux/kernel.h>
 20#include <linux/init.h>
 21#include <linux/module.h>
 22#include <linux/mutex.h>
 23#include <linux/list.h>
 24#include <linux/syscore_ops.h>
 25#include <linux/seq_file.h>
 26#include <linux/err.h>
 27#include <linux/io.h>
 
 28#include <linux/cpufreq.h>
 29#include <linux/clk.h>
 30#include <linux/sh_clk.h>
 31
 32static LIST_HEAD(clock_list);
 33static DEFINE_SPINLOCK(clock_lock);
 34static DEFINE_MUTEX(clock_list_sem);
 35
 36/* clock disable operations are not passed on to hardware during boot */
 37static int allow_disable;
 38
 39void clk_rate_table_build(struct clk *clk,
 40			  struct cpufreq_frequency_table *freq_table,
 41			  int nr_freqs,
 42			  struct clk_div_mult_table *src_table,
 43			  unsigned long *bitmap)
 44{
 45	unsigned long mult, div;
 46	unsigned long freq;
 47	int i;
 48
 49	clk->nr_freqs = nr_freqs;
 50
 51	for (i = 0; i < nr_freqs; i++) {
 52		div = 1;
 53		mult = 1;
 54
 55		if (src_table->divisors && i < src_table->nr_divisors)
 56			div = src_table->divisors[i];
 57
 58		if (src_table->multipliers && i < src_table->nr_multipliers)
 59			mult = src_table->multipliers[i];
 60
 61		if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
 62			freq = CPUFREQ_ENTRY_INVALID;
 63		else
 64			freq = clk->parent->rate * mult / div;
 65
 66		freq_table[i].driver_data = i;
 67		freq_table[i].frequency = freq;
 68	}
 69
 70	/* Termination entry */
 71	freq_table[i].driver_data = i;
 72	freq_table[i].frequency = CPUFREQ_TABLE_END;
 73}
 74
 75struct clk_rate_round_data;
 76
 77struct clk_rate_round_data {
 78	unsigned long rate;
 79	unsigned int min, max;
 80	long (*func)(unsigned int, struct clk_rate_round_data *);
 81	void *arg;
 82};
 83
 84#define for_each_frequency(pos, r, freq)			\
 85	for (pos = r->min, freq = r->func(pos, r);		\
 86	     pos <= r->max; pos++, freq = r->func(pos, r))	\
 87		if (unlikely(freq == 0))			\
 88			;					\
 89		else
 90
 91static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
 92{
 93	unsigned long rate_error, rate_error_prev = ~0UL;
 94	unsigned long highest, lowest, freq;
 95	long rate_best_fit = -ENOENT;
 96	int i;
 97
 98	highest = 0;
 99	lowest = ~0UL;
100
101	for_each_frequency(i, rounder, freq) {
102		if (freq > highest)
103			highest = freq;
104		if (freq < lowest)
105			lowest = freq;
106
107		rate_error = abs(freq - rounder->rate);
108		if (rate_error < rate_error_prev) {
109			rate_best_fit = freq;
110			rate_error_prev = rate_error;
111		}
112
113		if (rate_error == 0)
114			break;
115	}
116
117	if (rounder->rate >= highest)
118		rate_best_fit = highest;
119	if (rounder->rate <= lowest)
120		rate_best_fit = lowest;
121
122	return rate_best_fit;
123}
124
125static long clk_rate_table_iter(unsigned int pos,
126				struct clk_rate_round_data *rounder)
127{
128	struct cpufreq_frequency_table *freq_table = rounder->arg;
129	unsigned long freq = freq_table[pos].frequency;
130
131	if (freq == CPUFREQ_ENTRY_INVALID)
132		freq = 0;
133
134	return freq;
135}
136
137long clk_rate_table_round(struct clk *clk,
138			  struct cpufreq_frequency_table *freq_table,
139			  unsigned long rate)
140{
141	struct clk_rate_round_data table_round = {
142		.min	= 0,
143		.max	= clk->nr_freqs - 1,
144		.func	= clk_rate_table_iter,
145		.arg	= freq_table,
146		.rate	= rate,
147	};
148
149	if (clk->nr_freqs < 1)
150		return -ENOSYS;
151
152	return clk_rate_round_helper(&table_round);
153}
154
155static long clk_rate_div_range_iter(unsigned int pos,
156				    struct clk_rate_round_data *rounder)
157{
158	return clk_get_rate(rounder->arg) / pos;
159}
160
161long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
162			      unsigned int div_max, unsigned long rate)
163{
164	struct clk_rate_round_data div_range_round = {
165		.min	= div_min,
166		.max	= div_max,
167		.func	= clk_rate_div_range_iter,
168		.arg	= clk_get_parent(clk),
169		.rate	= rate,
170	};
171
172	return clk_rate_round_helper(&div_range_round);
173}
174
175static long clk_rate_mult_range_iter(unsigned int pos,
176				      struct clk_rate_round_data *rounder)
177{
178	return clk_get_rate(rounder->arg) * pos;
179}
180
181long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
182			       unsigned int mult_max, unsigned long rate)
183{
184	struct clk_rate_round_data mult_range_round = {
185		.min	= mult_min,
186		.max	= mult_max,
187		.func	= clk_rate_mult_range_iter,
188		.arg	= clk_get_parent(clk),
189		.rate	= rate,
190	};
191
192	return clk_rate_round_helper(&mult_range_round);
193}
194
195int clk_rate_table_find(struct clk *clk,
196			struct cpufreq_frequency_table *freq_table,
197			unsigned long rate)
198{
199	struct cpufreq_frequency_table *pos;
200	int idx;
 
 
201
202	cpufreq_for_each_valid_entry_idx(pos, freq_table, idx)
203		if (pos->frequency == rate)
204			return idx;
 
 
 
205
206	return -ENOENT;
207}
208
209/* Used for clocks that always have same value as the parent clock */
210unsigned long followparent_recalc(struct clk *clk)
211{
212	return clk->parent ? clk->parent->rate : 0;
213}
214
215int clk_reparent(struct clk *child, struct clk *parent)
216{
217	list_del_init(&child->sibling);
218	if (parent)
219		list_add(&child->sibling, &parent->children);
220	child->parent = parent;
221
 
 
 
222	return 0;
223}
224
225/* Propagate rate to children */
226void propagate_rate(struct clk *tclk)
227{
228	struct clk *clkp;
229
230	list_for_each_entry(clkp, &tclk->children, sibling) {
231		if (clkp->ops && clkp->ops->recalc)
232			clkp->rate = clkp->ops->recalc(clkp);
233
234		propagate_rate(clkp);
235	}
236}
237
238static void __clk_disable(struct clk *clk)
239{
240	if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
241		 clk))
242		return;
243
244	if (!(--clk->usecount)) {
245		if (likely(allow_disable && clk->ops && clk->ops->disable))
246			clk->ops->disable(clk);
247		if (likely(clk->parent))
248			__clk_disable(clk->parent);
249	}
250}
251
252void clk_disable(struct clk *clk)
253{
254	unsigned long flags;
255
256	if (!clk)
257		return;
258
259	spin_lock_irqsave(&clock_lock, flags);
260	__clk_disable(clk);
261	spin_unlock_irqrestore(&clock_lock, flags);
262}
263EXPORT_SYMBOL_GPL(clk_disable);
264
265static int __clk_enable(struct clk *clk)
266{
267	int ret = 0;
268
269	if (clk->usecount++ == 0) {
270		if (clk->parent) {
271			ret = __clk_enable(clk->parent);
272			if (unlikely(ret))
273				goto err;
274		}
275
276		if (clk->ops && clk->ops->enable) {
277			ret = clk->ops->enable(clk);
278			if (ret) {
279				if (clk->parent)
280					__clk_disable(clk->parent);
281				goto err;
282			}
283		}
284	}
285
286	return ret;
287err:
288	clk->usecount--;
289	return ret;
290}
291
292int clk_enable(struct clk *clk)
293{
294	unsigned long flags;
295	int ret;
296
297	if (!clk)
298		return 0;
299
300	spin_lock_irqsave(&clock_lock, flags);
301	ret = __clk_enable(clk);
302	spin_unlock_irqrestore(&clock_lock, flags);
303
304	return ret;
305}
306EXPORT_SYMBOL_GPL(clk_enable);
307
308static LIST_HEAD(root_clks);
309
310/**
311 * recalculate_root_clocks - recalculate and propagate all root clocks
312 *
313 * Recalculates all root clocks (clocks with no parent), which if the
314 * clock's .recalc is set correctly, should also propagate their rates.
315 * Called at init.
316 */
317void recalculate_root_clocks(void)
318{
319	struct clk *clkp;
320
321	list_for_each_entry(clkp, &root_clks, sibling) {
322		if (clkp->ops && clkp->ops->recalc)
323			clkp->rate = clkp->ops->recalc(clkp);
324		propagate_rate(clkp);
325	}
326}
327
328static struct clk_mapping dummy_mapping;
329
330static struct clk *lookup_root_clock(struct clk *clk)
331{
332	while (clk->parent)
333		clk = clk->parent;
334
335	return clk;
336}
337
338static int clk_establish_mapping(struct clk *clk)
339{
340	struct clk_mapping *mapping = clk->mapping;
341
342	/*
343	 * Propagate mappings.
344	 */
345	if (!mapping) {
346		struct clk *clkp;
347
348		/*
349		 * dummy mapping for root clocks with no specified ranges
350		 */
351		if (!clk->parent) {
352			clk->mapping = &dummy_mapping;
353			goto out;
354		}
355
356		/*
357		 * If we're on a child clock and it provides no mapping of its
358		 * own, inherit the mapping from its root clock.
359		 */
360		clkp = lookup_root_clock(clk);
361		mapping = clkp->mapping;
362		BUG_ON(!mapping);
363	}
364
365	/*
366	 * Establish initial mapping.
367	 */
368	if (!mapping->base && mapping->phys) {
369		kref_init(&mapping->ref);
370
371		mapping->base = ioremap(mapping->phys, mapping->len);
372		if (unlikely(!mapping->base))
373			return -ENXIO;
374	} else if (mapping->base) {
375		/*
376		 * Bump the refcount for an existing mapping
377		 */
378		kref_get(&mapping->ref);
379	}
380
381	clk->mapping = mapping;
382out:
383	clk->mapped_reg = clk->mapping->base;
384	clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
385	return 0;
386}
387
388static void clk_destroy_mapping(struct kref *kref)
389{
390	struct clk_mapping *mapping;
391
392	mapping = container_of(kref, struct clk_mapping, ref);
393
394	iounmap(mapping->base);
395}
396
397static void clk_teardown_mapping(struct clk *clk)
398{
399	struct clk_mapping *mapping = clk->mapping;
400
401	/* Nothing to do */
402	if (mapping == &dummy_mapping)
403		goto out;
404
405	kref_put(&mapping->ref, clk_destroy_mapping);
406	clk->mapping = NULL;
407out:
408	clk->mapped_reg = NULL;
409}
410
411int clk_register(struct clk *clk)
412{
413	int ret;
414
415	if (IS_ERR_OR_NULL(clk))
416		return -EINVAL;
417
418	/*
419	 * trap out already registered clocks
420	 */
421	if (clk->node.next || clk->node.prev)
422		return 0;
423
424	mutex_lock(&clock_list_sem);
425
426	INIT_LIST_HEAD(&clk->children);
427	clk->usecount = 0;
428
429	ret = clk_establish_mapping(clk);
430	if (unlikely(ret))
431		goto out_unlock;
432
433	if (clk->parent)
434		list_add(&clk->sibling, &clk->parent->children);
435	else
436		list_add(&clk->sibling, &root_clks);
437
438	list_add(&clk->node, &clock_list);
439
440#ifdef CONFIG_SH_CLK_CPG_LEGACY
441	if (clk->ops && clk->ops->init)
442		clk->ops->init(clk);
443#endif
444
445out_unlock:
446	mutex_unlock(&clock_list_sem);
447
448	return ret;
449}
450EXPORT_SYMBOL_GPL(clk_register);
451
452void clk_unregister(struct clk *clk)
453{
454	mutex_lock(&clock_list_sem);
455	list_del(&clk->sibling);
456	list_del(&clk->node);
457	clk_teardown_mapping(clk);
458	mutex_unlock(&clock_list_sem);
459}
460EXPORT_SYMBOL_GPL(clk_unregister);
461
462void clk_enable_init_clocks(void)
463{
464	struct clk *clkp;
465
466	list_for_each_entry(clkp, &clock_list, node)
467		if (clkp->flags & CLK_ENABLE_ON_INIT)
468			clk_enable(clkp);
469}
470
471unsigned long clk_get_rate(struct clk *clk)
472{
473	if (!clk)
474		return 0;
475
476	return clk->rate;
477}
478EXPORT_SYMBOL_GPL(clk_get_rate);
479
480int clk_set_rate(struct clk *clk, unsigned long rate)
481{
482	int ret = -EOPNOTSUPP;
483	unsigned long flags;
484
485	if (!clk)
486		return 0;
487
488	spin_lock_irqsave(&clock_lock, flags);
489
490	if (likely(clk->ops && clk->ops->set_rate)) {
491		ret = clk->ops->set_rate(clk, rate);
492		if (ret != 0)
493			goto out_unlock;
494	} else {
495		clk->rate = rate;
496		ret = 0;
497	}
498
499	if (clk->ops && clk->ops->recalc)
500		clk->rate = clk->ops->recalc(clk);
501
502	propagate_rate(clk);
503
504out_unlock:
505	spin_unlock_irqrestore(&clock_lock, flags);
506
507	return ret;
508}
509EXPORT_SYMBOL_GPL(clk_set_rate);
510
511int clk_set_parent(struct clk *clk, struct clk *parent)
512{
513	unsigned long flags;
514	int ret = -EINVAL;
515
516	if (!parent || !clk)
517		return ret;
518	if (clk->parent == parent)
519		return 0;
520
521	spin_lock_irqsave(&clock_lock, flags);
522	if (clk->usecount == 0) {
523		if (clk->ops->set_parent)
524			ret = clk->ops->set_parent(clk, parent);
525		else
526			ret = clk_reparent(clk, parent);
527
528		if (ret == 0) {
529			if (clk->ops->recalc)
530				clk->rate = clk->ops->recalc(clk);
531			pr_debug("set parent of %p to %p (new rate %ld)\n",
532				 clk, clk->parent, clk->rate);
533			propagate_rate(clk);
534		}
535	} else
536		ret = -EBUSY;
537	spin_unlock_irqrestore(&clock_lock, flags);
538
539	return ret;
540}
541EXPORT_SYMBOL_GPL(clk_set_parent);
542
543struct clk *clk_get_parent(struct clk *clk)
544{
545	if (!clk)
546		return NULL;
547
548	return clk->parent;
549}
550EXPORT_SYMBOL_GPL(clk_get_parent);
551
552long clk_round_rate(struct clk *clk, unsigned long rate)
553{
554	if (!clk)
555		return 0;
556
557	if (likely(clk->ops && clk->ops->round_rate)) {
558		unsigned long flags, rounded;
559
560		spin_lock_irqsave(&clock_lock, flags);
561		rounded = clk->ops->round_rate(clk, rate);
562		spin_unlock_irqrestore(&clock_lock, flags);
563
564		return rounded;
565	}
566
567	return clk_get_rate(clk);
568}
569EXPORT_SYMBOL_GPL(clk_round_rate);
570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
571#ifdef CONFIG_PM
572static void clks_core_resume(void)
573{
574	struct clk *clkp;
575
576	list_for_each_entry(clkp, &clock_list, node) {
577		if (likely(clkp->usecount && clkp->ops)) {
578			unsigned long rate = clkp->rate;
579
580			if (likely(clkp->ops->set_parent))
581				clkp->ops->set_parent(clkp,
582					clkp->parent);
583			if (likely(clkp->ops->set_rate))
584				clkp->ops->set_rate(clkp, rate);
585			else if (likely(clkp->ops->recalc))
586				clkp->rate = clkp->ops->recalc(clkp);
587		}
588	}
589}
590
591static struct syscore_ops clks_syscore_ops = {
592	.resume = clks_core_resume,
593};
594
595static int __init clk_syscore_init(void)
596{
597	register_syscore_ops(&clks_syscore_ops);
598
599	return 0;
600}
601subsys_initcall(clk_syscore_init);
602#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603
604static int __init clk_late_init(void)
605{
606	unsigned long flags;
607	struct clk *clk;
608
609	/* disable all clocks with zero use count */
610	mutex_lock(&clock_list_sem);
611	spin_lock_irqsave(&clock_lock, flags);
612
613	list_for_each_entry(clk, &clock_list, node)
614		if (!clk->usecount && clk->ops && clk->ops->disable)
615			clk->ops->disable(clk);
616
617	/* from now on allow clock disable operations */
618	allow_disable = 1;
619
620	spin_unlock_irqrestore(&clock_lock, flags);
621	mutex_unlock(&clock_list_sem);
622	return 0;
623}
624late_initcall(clk_late_init);
v3.1
  1/*
  2 * SuperH clock framework
  3 *
  4 *  Copyright (C) 2005 - 2010  Paul Mundt
  5 *
  6 * This clock framework is derived from the OMAP version by:
  7 *
  8 *	Copyright (C) 2004 - 2008 Nokia Corporation
  9 *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
 10 *
 11 *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
 12 *
 13 * This file is subject to the terms and conditions of the GNU General Public
 14 * License.  See the file "COPYING" in the main directory of this archive
 15 * for more details.
 16 */
 17#define pr_fmt(fmt) "clock: " fmt
 18
 19#include <linux/kernel.h>
 20#include <linux/init.h>
 21#include <linux/module.h>
 22#include <linux/mutex.h>
 23#include <linux/list.h>
 24#include <linux/syscore_ops.h>
 25#include <linux/seq_file.h>
 26#include <linux/err.h>
 27#include <linux/io.h>
 28#include <linux/debugfs.h>
 29#include <linux/cpufreq.h>
 30#include <linux/clk.h>
 31#include <linux/sh_clk.h>
 32
 33static LIST_HEAD(clock_list);
 34static DEFINE_SPINLOCK(clock_lock);
 35static DEFINE_MUTEX(clock_list_sem);
 36
 37/* clock disable operations are not passed on to hardware during boot */
 38static int allow_disable;
 39
 40void clk_rate_table_build(struct clk *clk,
 41			  struct cpufreq_frequency_table *freq_table,
 42			  int nr_freqs,
 43			  struct clk_div_mult_table *src_table,
 44			  unsigned long *bitmap)
 45{
 46	unsigned long mult, div;
 47	unsigned long freq;
 48	int i;
 49
 50	clk->nr_freqs = nr_freqs;
 51
 52	for (i = 0; i < nr_freqs; i++) {
 53		div = 1;
 54		mult = 1;
 55
 56		if (src_table->divisors && i < src_table->nr_divisors)
 57			div = src_table->divisors[i];
 58
 59		if (src_table->multipliers && i < src_table->nr_multipliers)
 60			mult = src_table->multipliers[i];
 61
 62		if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
 63			freq = CPUFREQ_ENTRY_INVALID;
 64		else
 65			freq = clk->parent->rate * mult / div;
 66
 67		freq_table[i].index = i;
 68		freq_table[i].frequency = freq;
 69	}
 70
 71	/* Termination entry */
 72	freq_table[i].index = i;
 73	freq_table[i].frequency = CPUFREQ_TABLE_END;
 74}
 75
 76struct clk_rate_round_data;
 77
 78struct clk_rate_round_data {
 79	unsigned long rate;
 80	unsigned int min, max;
 81	long (*func)(unsigned int, struct clk_rate_round_data *);
 82	void *arg;
 83};
 84
 85#define for_each_frequency(pos, r, freq)			\
 86	for (pos = r->min, freq = r->func(pos, r);		\
 87	     pos <= r->max; pos++, freq = r->func(pos, r))	\
 88		if (unlikely(freq == 0))			\
 89			;					\
 90		else
 91
 92static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
 93{
 94	unsigned long rate_error, rate_error_prev = ~0UL;
 95	unsigned long highest, lowest, freq;
 96	long rate_best_fit = -ENOENT;
 97	int i;
 98
 99	highest = 0;
100	lowest = ~0UL;
101
102	for_each_frequency(i, rounder, freq) {
103		if (freq > highest)
104			highest = freq;
105		if (freq < lowest)
106			lowest = freq;
107
108		rate_error = abs(freq - rounder->rate);
109		if (rate_error < rate_error_prev) {
110			rate_best_fit = freq;
111			rate_error_prev = rate_error;
112		}
113
114		if (rate_error == 0)
115			break;
116	}
117
118	if (rounder->rate >= highest)
119		rate_best_fit = highest;
120	if (rounder->rate <= lowest)
121		rate_best_fit = lowest;
122
123	return rate_best_fit;
124}
125
126static long clk_rate_table_iter(unsigned int pos,
127				struct clk_rate_round_data *rounder)
128{
129	struct cpufreq_frequency_table *freq_table = rounder->arg;
130	unsigned long freq = freq_table[pos].frequency;
131
132	if (freq == CPUFREQ_ENTRY_INVALID)
133		freq = 0;
134
135	return freq;
136}
137
138long clk_rate_table_round(struct clk *clk,
139			  struct cpufreq_frequency_table *freq_table,
140			  unsigned long rate)
141{
142	struct clk_rate_round_data table_round = {
143		.min	= 0,
144		.max	= clk->nr_freqs - 1,
145		.func	= clk_rate_table_iter,
146		.arg	= freq_table,
147		.rate	= rate,
148	};
149
150	if (clk->nr_freqs < 1)
151		return -ENOSYS;
152
153	return clk_rate_round_helper(&table_round);
154}
155
156static long clk_rate_div_range_iter(unsigned int pos,
157				    struct clk_rate_round_data *rounder)
158{
159	return clk_get_rate(rounder->arg) / pos;
160}
161
162long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
163			      unsigned int div_max, unsigned long rate)
164{
165	struct clk_rate_round_data div_range_round = {
166		.min	= div_min,
167		.max	= div_max,
168		.func	= clk_rate_div_range_iter,
169		.arg	= clk_get_parent(clk),
170		.rate	= rate,
171	};
172
173	return clk_rate_round_helper(&div_range_round);
174}
175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176int clk_rate_table_find(struct clk *clk,
177			struct cpufreq_frequency_table *freq_table,
178			unsigned long rate)
179{
180	int i;
181
182	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
183		unsigned long freq = freq_table[i].frequency;
184
185		if (freq == CPUFREQ_ENTRY_INVALID)
186			continue;
187
188		if (freq == rate)
189			return i;
190	}
191
192	return -ENOENT;
193}
194
195/* Used for clocks that always have same value as the parent clock */
196unsigned long followparent_recalc(struct clk *clk)
197{
198	return clk->parent ? clk->parent->rate : 0;
199}
200
201int clk_reparent(struct clk *child, struct clk *parent)
202{
203	list_del_init(&child->sibling);
204	if (parent)
205		list_add(&child->sibling, &parent->children);
206	child->parent = parent;
207
208	/* now do the debugfs renaming to reattach the child
209	   to the proper parent */
210
211	return 0;
212}
213
214/* Propagate rate to children */
215void propagate_rate(struct clk *tclk)
216{
217	struct clk *clkp;
218
219	list_for_each_entry(clkp, &tclk->children, sibling) {
220		if (clkp->ops && clkp->ops->recalc)
221			clkp->rate = clkp->ops->recalc(clkp);
222
223		propagate_rate(clkp);
224	}
225}
226
227static void __clk_disable(struct clk *clk)
228{
229	if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
230		 clk))
231		return;
232
233	if (!(--clk->usecount)) {
234		if (likely(allow_disable && clk->ops && clk->ops->disable))
235			clk->ops->disable(clk);
236		if (likely(clk->parent))
237			__clk_disable(clk->parent);
238	}
239}
240
241void clk_disable(struct clk *clk)
242{
243	unsigned long flags;
244
245	if (!clk)
246		return;
247
248	spin_lock_irqsave(&clock_lock, flags);
249	__clk_disable(clk);
250	spin_unlock_irqrestore(&clock_lock, flags);
251}
252EXPORT_SYMBOL_GPL(clk_disable);
253
254static int __clk_enable(struct clk *clk)
255{
256	int ret = 0;
257
258	if (clk->usecount++ == 0) {
259		if (clk->parent) {
260			ret = __clk_enable(clk->parent);
261			if (unlikely(ret))
262				goto err;
263		}
264
265		if (clk->ops && clk->ops->enable) {
266			ret = clk->ops->enable(clk);
267			if (ret) {
268				if (clk->parent)
269					__clk_disable(clk->parent);
270				goto err;
271			}
272		}
273	}
274
275	return ret;
276err:
277	clk->usecount--;
278	return ret;
279}
280
281int clk_enable(struct clk *clk)
282{
283	unsigned long flags;
284	int ret;
285
286	if (!clk)
287		return -EINVAL;
288
289	spin_lock_irqsave(&clock_lock, flags);
290	ret = __clk_enable(clk);
291	spin_unlock_irqrestore(&clock_lock, flags);
292
293	return ret;
294}
295EXPORT_SYMBOL_GPL(clk_enable);
296
297static LIST_HEAD(root_clks);
298
299/**
300 * recalculate_root_clocks - recalculate and propagate all root clocks
301 *
302 * Recalculates all root clocks (clocks with no parent), which if the
303 * clock's .recalc is set correctly, should also propagate their rates.
304 * Called at init.
305 */
306void recalculate_root_clocks(void)
307{
308	struct clk *clkp;
309
310	list_for_each_entry(clkp, &root_clks, sibling) {
311		if (clkp->ops && clkp->ops->recalc)
312			clkp->rate = clkp->ops->recalc(clkp);
313		propagate_rate(clkp);
314	}
315}
316
317static struct clk_mapping dummy_mapping;
318
319static struct clk *lookup_root_clock(struct clk *clk)
320{
321	while (clk->parent)
322		clk = clk->parent;
323
324	return clk;
325}
326
327static int clk_establish_mapping(struct clk *clk)
328{
329	struct clk_mapping *mapping = clk->mapping;
330
331	/*
332	 * Propagate mappings.
333	 */
334	if (!mapping) {
335		struct clk *clkp;
336
337		/*
338		 * dummy mapping for root clocks with no specified ranges
339		 */
340		if (!clk->parent) {
341			clk->mapping = &dummy_mapping;
342			return 0;
343		}
344
345		/*
346		 * If we're on a child clock and it provides no mapping of its
347		 * own, inherit the mapping from its root clock.
348		 */
349		clkp = lookup_root_clock(clk);
350		mapping = clkp->mapping;
351		BUG_ON(!mapping);
352	}
353
354	/*
355	 * Establish initial mapping.
356	 */
357	if (!mapping->base && mapping->phys) {
358		kref_init(&mapping->ref);
359
360		mapping->base = ioremap_nocache(mapping->phys, mapping->len);
361		if (unlikely(!mapping->base))
362			return -ENXIO;
363	} else if (mapping->base) {
364		/*
365		 * Bump the refcount for an existing mapping
366		 */
367		kref_get(&mapping->ref);
368	}
369
370	clk->mapping = mapping;
 
 
 
371	return 0;
372}
373
374static void clk_destroy_mapping(struct kref *kref)
375{
376	struct clk_mapping *mapping;
377
378	mapping = container_of(kref, struct clk_mapping, ref);
379
380	iounmap(mapping->base);
381}
382
383static void clk_teardown_mapping(struct clk *clk)
384{
385	struct clk_mapping *mapping = clk->mapping;
386
387	/* Nothing to do */
388	if (mapping == &dummy_mapping)
389		return;
390
391	kref_put(&mapping->ref, clk_destroy_mapping);
392	clk->mapping = NULL;
 
 
393}
394
395int clk_register(struct clk *clk)
396{
397	int ret;
398
399	if (IS_ERR_OR_NULL(clk))
400		return -EINVAL;
401
402	/*
403	 * trap out already registered clocks
404	 */
405	if (clk->node.next || clk->node.prev)
406		return 0;
407
408	mutex_lock(&clock_list_sem);
409
410	INIT_LIST_HEAD(&clk->children);
411	clk->usecount = 0;
412
413	ret = clk_establish_mapping(clk);
414	if (unlikely(ret))
415		goto out_unlock;
416
417	if (clk->parent)
418		list_add(&clk->sibling, &clk->parent->children);
419	else
420		list_add(&clk->sibling, &root_clks);
421
422	list_add(&clk->node, &clock_list);
423
424#ifdef CONFIG_SH_CLK_CPG_LEGACY
425	if (clk->ops && clk->ops->init)
426		clk->ops->init(clk);
427#endif
428
429out_unlock:
430	mutex_unlock(&clock_list_sem);
431
432	return ret;
433}
434EXPORT_SYMBOL_GPL(clk_register);
435
436void clk_unregister(struct clk *clk)
437{
438	mutex_lock(&clock_list_sem);
439	list_del(&clk->sibling);
440	list_del(&clk->node);
441	clk_teardown_mapping(clk);
442	mutex_unlock(&clock_list_sem);
443}
444EXPORT_SYMBOL_GPL(clk_unregister);
445
446void clk_enable_init_clocks(void)
447{
448	struct clk *clkp;
449
450	list_for_each_entry(clkp, &clock_list, node)
451		if (clkp->flags & CLK_ENABLE_ON_INIT)
452			clk_enable(clkp);
453}
454
455unsigned long clk_get_rate(struct clk *clk)
456{
 
 
 
457	return clk->rate;
458}
459EXPORT_SYMBOL_GPL(clk_get_rate);
460
461int clk_set_rate(struct clk *clk, unsigned long rate)
462{
463	int ret = -EOPNOTSUPP;
464	unsigned long flags;
465
 
 
 
466	spin_lock_irqsave(&clock_lock, flags);
467
468	if (likely(clk->ops && clk->ops->set_rate)) {
469		ret = clk->ops->set_rate(clk, rate);
470		if (ret != 0)
471			goto out_unlock;
472	} else {
473		clk->rate = rate;
474		ret = 0;
475	}
476
477	if (clk->ops && clk->ops->recalc)
478		clk->rate = clk->ops->recalc(clk);
479
480	propagate_rate(clk);
481
482out_unlock:
483	spin_unlock_irqrestore(&clock_lock, flags);
484
485	return ret;
486}
487EXPORT_SYMBOL_GPL(clk_set_rate);
488
489int clk_set_parent(struct clk *clk, struct clk *parent)
490{
491	unsigned long flags;
492	int ret = -EINVAL;
493
494	if (!parent || !clk)
495		return ret;
496	if (clk->parent == parent)
497		return 0;
498
499	spin_lock_irqsave(&clock_lock, flags);
500	if (clk->usecount == 0) {
501		if (clk->ops->set_parent)
502			ret = clk->ops->set_parent(clk, parent);
503		else
504			ret = clk_reparent(clk, parent);
505
506		if (ret == 0) {
507			if (clk->ops->recalc)
508				clk->rate = clk->ops->recalc(clk);
509			pr_debug("set parent of %p to %p (new rate %ld)\n",
510				 clk, clk->parent, clk->rate);
511			propagate_rate(clk);
512		}
513	} else
514		ret = -EBUSY;
515	spin_unlock_irqrestore(&clock_lock, flags);
516
517	return ret;
518}
519EXPORT_SYMBOL_GPL(clk_set_parent);
520
521struct clk *clk_get_parent(struct clk *clk)
522{
 
 
 
523	return clk->parent;
524}
525EXPORT_SYMBOL_GPL(clk_get_parent);
526
527long clk_round_rate(struct clk *clk, unsigned long rate)
528{
 
 
 
529	if (likely(clk->ops && clk->ops->round_rate)) {
530		unsigned long flags, rounded;
531
532		spin_lock_irqsave(&clock_lock, flags);
533		rounded = clk->ops->round_rate(clk, rate);
534		spin_unlock_irqrestore(&clock_lock, flags);
535
536		return rounded;
537	}
538
539	return clk_get_rate(clk);
540}
541EXPORT_SYMBOL_GPL(clk_round_rate);
542
543long clk_round_parent(struct clk *clk, unsigned long target,
544		      unsigned long *best_freq, unsigned long *parent_freq,
545		      unsigned int div_min, unsigned int div_max)
546{
547	struct cpufreq_frequency_table *freq, *best = NULL;
548	unsigned long error = ULONG_MAX, freq_high, freq_low, div;
549	struct clk *parent = clk_get_parent(clk);
550
551	if (!parent) {
552		*parent_freq = 0;
553		*best_freq = clk_round_rate(clk, target);
554		return abs(target - *best_freq);
555	}
556
557	for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
558	     freq++) {
559		if (freq->frequency == CPUFREQ_ENTRY_INVALID)
560			continue;
561
562		if (unlikely(freq->frequency / target <= div_min - 1)) {
563			unsigned long freq_max;
564
565			freq_max = (freq->frequency + div_min / 2) / div_min;
566			if (error > target - freq_max) {
567				error = target - freq_max;
568				best = freq;
569				if (best_freq)
570					*best_freq = freq_max;
571			}
572
573			pr_debug("too low freq %u, error %lu\n", freq->frequency,
574				 target - freq_max);
575
576			if (!error)
577				break;
578
579			continue;
580		}
581
582		if (unlikely(freq->frequency / target >= div_max)) {
583			unsigned long freq_min;
584
585			freq_min = (freq->frequency + div_max / 2) / div_max;
586			if (error > freq_min - target) {
587				error = freq_min - target;
588				best = freq;
589				if (best_freq)
590					*best_freq = freq_min;
591			}
592
593			pr_debug("too high freq %u, error %lu\n", freq->frequency,
594				 freq_min - target);
595
596			if (!error)
597				break;
598
599			continue;
600		}
601
602		div = freq->frequency / target;
603		freq_high = freq->frequency / div;
604		freq_low = freq->frequency / (div + 1);
605
606		if (freq_high - target < error) {
607			error = freq_high - target;
608			best = freq;
609			if (best_freq)
610				*best_freq = freq_high;
611		}
612
613		if (target - freq_low < error) {
614			error = target - freq_low;
615			best = freq;
616			if (best_freq)
617				*best_freq = freq_low;
618		}
619
620		pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
621			 freq->frequency, div, freq_high, div + 1, freq_low,
622			 *best_freq, best->frequency);
623
624		if (!error)
625			break;
626	}
627
628	if (parent_freq)
629		*parent_freq = best->frequency;
630
631	return error;
632}
633EXPORT_SYMBOL_GPL(clk_round_parent);
634
635#ifdef CONFIG_PM
636static void clks_core_resume(void)
637{
638	struct clk *clkp;
639
640	list_for_each_entry(clkp, &clock_list, node) {
641		if (likely(clkp->usecount && clkp->ops)) {
642			unsigned long rate = clkp->rate;
643
644			if (likely(clkp->ops->set_parent))
645				clkp->ops->set_parent(clkp,
646					clkp->parent);
647			if (likely(clkp->ops->set_rate))
648				clkp->ops->set_rate(clkp, rate);
649			else if (likely(clkp->ops->recalc))
650				clkp->rate = clkp->ops->recalc(clkp);
651		}
652	}
653}
654
655static struct syscore_ops clks_syscore_ops = {
656	.resume = clks_core_resume,
657};
658
659static int __init clk_syscore_init(void)
660{
661	register_syscore_ops(&clks_syscore_ops);
662
663	return 0;
664}
665subsys_initcall(clk_syscore_init);
666#endif
667
668/*
669 *	debugfs support to trace clock tree hierarchy and attributes
670 */
671static struct dentry *clk_debugfs_root;
672
673static int clk_debugfs_register_one(struct clk *c)
674{
675	int err;
676	struct dentry *d;
677	struct clk *pa = c->parent;
678	char s[255];
679	char *p = s;
680
681	p += sprintf(p, "%p", c);
682	d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
683	if (!d)
684		return -ENOMEM;
685	c->dentry = d;
686
687	d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
688	if (!d) {
689		err = -ENOMEM;
690		goto err_out;
691	}
692	d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
693	if (!d) {
694		err = -ENOMEM;
695		goto err_out;
696	}
697	d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
698	if (!d) {
699		err = -ENOMEM;
700		goto err_out;
701	}
702	return 0;
703
704err_out:
705	debugfs_remove_recursive(c->dentry);
706	return err;
707}
708
709static int clk_debugfs_register(struct clk *c)
710{
711	int err;
712	struct clk *pa = c->parent;
713
714	if (pa && !pa->dentry) {
715		err = clk_debugfs_register(pa);
716		if (err)
717			return err;
718	}
719
720	if (!c->dentry) {
721		err = clk_debugfs_register_one(c);
722		if (err)
723			return err;
724	}
725	return 0;
726}
727
728static int __init clk_debugfs_init(void)
729{
730	struct clk *c;
731	struct dentry *d;
732	int err;
733
734	d = debugfs_create_dir("clock", NULL);
735	if (!d)
736		return -ENOMEM;
737	clk_debugfs_root = d;
738
739	list_for_each_entry(c, &clock_list, node) {
740		err = clk_debugfs_register(c);
741		if (err)
742			goto err_out;
743	}
744	return 0;
745err_out:
746	debugfs_remove_recursive(clk_debugfs_root);
747	return err;
748}
749late_initcall(clk_debugfs_init);
750
751static int __init clk_late_init(void)
752{
753	unsigned long flags;
754	struct clk *clk;
755
756	/* disable all clocks with zero use count */
757	mutex_lock(&clock_list_sem);
758	spin_lock_irqsave(&clock_lock, flags);
759
760	list_for_each_entry(clk, &clock_list, node)
761		if (!clk->usecount && clk->ops && clk->ops->disable)
762			clk->ops->disable(clk);
763
764	/* from now on allow clock disable operations */
765	allow_disable = 1;
766
767	spin_unlock_irqrestore(&clock_lock, flags);
768	mutex_unlock(&clock_list_sem);
769	return 0;
770}
771late_initcall(clk_late_init);