Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/*
  2 *
  3 * Copyright (C) 2010 Google, Inc.
  4 *
  5 * Author:
  6 *	Colin Cross <ccross@google.com>
  7 *
  8 * This software is licensed under the terms of the GNU General Public
  9 * License version 2, as published by the Free Software Foundation, and
 10 * may be copied, distributed, and modified under those terms.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 */
 18
 19#include <linux/kernel.h>
 20#include <linux/clk.h>
 21#include <linux/clkdev.h>
 22#include <linux/debugfs.h>
 23#include <linux/delay.h>
 24#include <linux/init.h>
 25#include <linux/list.h>
 26#include <linux/module.h>
 27#include <linux/sched.h>
 28#include <linux/seq_file.h>
 29#include <linux/slab.h>
 30
 31#include <mach/clk.h>
 32
 33#include "board.h"
 34#include "clock.h"
 35
 36/*
 37 * Locking:
 38 *
 39 * Each struct clk has a spinlock.
 40 *
 41 * To avoid AB-BA locking problems, locks must always be traversed from child
 42 * clock to parent clock.  For example, when enabling a clock, the clock's lock
 43 * is taken, and then clk_enable is called on the parent, which take's the
 44 * parent clock's lock.  There is one exceptions to this ordering: When dumping
 45 * the clock tree through debugfs.  In this case, clk_lock_all is called,
 46 * which attemps to iterate through the entire list of clocks and take every
 47 * clock lock.  If any call to spin_trylock fails, all locked clocks are
 48 * unlocked, and the process is retried.  When all the locks are held,
 49 * the only clock operation that can be called is clk_get_rate_all_locked.
 50 *
 51 * Within a single clock, no clock operation can call another clock operation
 52 * on itself, except for clk_get_rate_locked and clk_set_rate_locked.  Any
 53 * clock operation can call any other clock operation on any of it's possible
 54 * parents.
 55 *
 56 * An additional mutex, clock_list_lock, is used to protect the list of all
 57 * clocks.
 58 *
 59 * The clock operations must lock internally to protect against
 60 * read-modify-write on registers that are shared by multiple clocks
 61 */
 62static DEFINE_MUTEX(clock_list_lock);
 63static LIST_HEAD(clocks);
 64
 65struct clk *tegra_get_clock_by_name(const char *name)
 66{
 67	struct clk *c;
 68	struct clk *ret = NULL;
 69	mutex_lock(&clock_list_lock);
 70	list_for_each_entry(c, &clocks, node) {
 71		if (strcmp(c->name, name) == 0) {
 72			ret = c;
 73			break;
 74		}
 75	}
 76	mutex_unlock(&clock_list_lock);
 77	return ret;
 78}
 79
 80/* Must be called with c->spinlock held */
 81static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
 82{
 83	u64 rate;
 84
 85	rate = clk_get_rate(p);
 86
 87	if (c->mul != 0 && c->div != 0) {
 88		rate *= c->mul;
 89		rate += c->div - 1; /* round up */
 90		do_div(rate, c->div);
 91	}
 92
 93	return rate;
 94}
 95
 96/* Must be called with c->spinlock held */
 97unsigned long clk_get_rate_locked(struct clk *c)
 98{
 99	unsigned long rate;
100
101	if (c->parent)
102		rate = clk_predict_rate_from_parent(c, c->parent);
103	else
104		rate = c->rate;
105
106	return rate;
107}
108
109unsigned long clk_get_rate(struct clk *c)
110{
111	unsigned long flags;
112	unsigned long rate;
113
114	spin_lock_irqsave(&c->spinlock, flags);
115
116	rate = clk_get_rate_locked(c);
117
118	spin_unlock_irqrestore(&c->spinlock, flags);
119
120	return rate;
121}
122EXPORT_SYMBOL(clk_get_rate);
123
124int clk_reparent(struct clk *c, struct clk *parent)
125{
126	c->parent = parent;
127	return 0;
128}
129
130void clk_init(struct clk *c)
131{
132	spin_lock_init(&c->spinlock);
133
134	if (c->ops && c->ops->init)
135		c->ops->init(c);
136
137	if (!c->ops || !c->ops->enable) {
138		c->refcnt++;
139		c->set = true;
140		if (c->parent)
141			c->state = c->parent->state;
142		else
143			c->state = ON;
144	}
145
146	mutex_lock(&clock_list_lock);
147	list_add(&c->node, &clocks);
148	mutex_unlock(&clock_list_lock);
149}
150
151int clk_enable(struct clk *c)
152{
153	int ret = 0;
154	unsigned long flags;
155
156	spin_lock_irqsave(&c->spinlock, flags);
157
158	if (c->refcnt == 0) {
159		if (c->parent) {
160			ret = clk_enable(c->parent);
161			if (ret)
162				goto out;
163		}
164
165		if (c->ops && c->ops->enable) {
166			ret = c->ops->enable(c);
167			if (ret) {
168				if (c->parent)
169					clk_disable(c->parent);
170				goto out;
171			}
172			c->state = ON;
173			c->set = true;
174		}
175	}
176	c->refcnt++;
177out:
178	spin_unlock_irqrestore(&c->spinlock, flags);
179	return ret;
180}
181EXPORT_SYMBOL(clk_enable);
182
183void clk_disable(struct clk *c)
184{
185	unsigned long flags;
186
187	spin_lock_irqsave(&c->spinlock, flags);
188
189	if (c->refcnt == 0) {
190		WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
191		spin_unlock_irqrestore(&c->spinlock, flags);
192		return;
193	}
194	if (c->refcnt == 1) {
195		if (c->ops && c->ops->disable)
196			c->ops->disable(c);
197
198		if (c->parent)
199			clk_disable(c->parent);
200
201		c->state = OFF;
202	}
203	c->refcnt--;
204
205	spin_unlock_irqrestore(&c->spinlock, flags);
206}
207EXPORT_SYMBOL(clk_disable);
208
209int clk_set_parent(struct clk *c, struct clk *parent)
210{
211	int ret;
212	unsigned long flags;
213	unsigned long new_rate;
214	unsigned long old_rate;
215
216	spin_lock_irqsave(&c->spinlock, flags);
217
218	if (!c->ops || !c->ops->set_parent) {
219		ret = -ENOSYS;
220		goto out;
221	}
222
223	new_rate = clk_predict_rate_from_parent(c, parent);
224	old_rate = clk_get_rate_locked(c);
225
226	ret = c->ops->set_parent(c, parent);
227	if (ret)
228		goto out;
229
230out:
231	spin_unlock_irqrestore(&c->spinlock, flags);
232	return ret;
233}
234EXPORT_SYMBOL(clk_set_parent);
235
236struct clk *clk_get_parent(struct clk *c)
237{
238	return c->parent;
239}
240EXPORT_SYMBOL(clk_get_parent);
241
242int clk_set_rate_locked(struct clk *c, unsigned long rate)
243{
244	long new_rate;
245
246	if (!c->ops || !c->ops->set_rate)
247		return -ENOSYS;
248
249	if (rate > c->max_rate)
250		rate = c->max_rate;
251
252	if (c->ops && c->ops->round_rate) {
253		new_rate = c->ops->round_rate(c, rate);
254
255		if (new_rate < 0)
256			return new_rate;
257
258		rate = new_rate;
259	}
260
261	return c->ops->set_rate(c, rate);
262}
263
264int clk_set_rate(struct clk *c, unsigned long rate)
265{
266	int ret;
267	unsigned long flags;
268
269	spin_lock_irqsave(&c->spinlock, flags);
270
271	ret = clk_set_rate_locked(c, rate);
272
273	spin_unlock_irqrestore(&c->spinlock, flags);
274
275	return ret;
276}
277EXPORT_SYMBOL(clk_set_rate);
278
279
280/* Must be called with clocks lock and all indvidual clock locks held */
281unsigned long clk_get_rate_all_locked(struct clk *c)
282{
283	u64 rate;
284	int mul = 1;
285	int div = 1;
286	struct clk *p = c;
287
288	while (p) {
289		c = p;
290		if (c->mul != 0 && c->div != 0) {
291			mul *= c->mul;
292			div *= c->div;
293		}
294		p = c->parent;
295	}
296
297	rate = c->rate;
298	rate *= mul;
299	do_div(rate, div);
300
301	return rate;
302}
303
304long clk_round_rate(struct clk *c, unsigned long rate)
305{
306	unsigned long flags;
307	long ret;
308
309	spin_lock_irqsave(&c->spinlock, flags);
310
311	if (!c->ops || !c->ops->round_rate) {
312		ret = -ENOSYS;
313		goto out;
314	}
315
316	if (rate > c->max_rate)
317		rate = c->max_rate;
318
319	ret = c->ops->round_rate(c, rate);
320
321out:
322	spin_unlock_irqrestore(&c->spinlock, flags);
323	return ret;
324}
325EXPORT_SYMBOL(clk_round_rate);
326
327static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
328{
329	struct clk *c;
330	struct clk *p;
331
332	int ret = 0;
333
334	c = tegra_get_clock_by_name(table->name);
335
336	if (!c) {
337		pr_warning("Unable to initialize clock %s\n",
338			table->name);
339		return -ENODEV;
340	}
341
342	if (table->parent) {
343		p = tegra_get_clock_by_name(table->parent);
344		if (!p) {
345			pr_warning("Unable to find parent %s of clock %s\n",
346				table->parent, table->name);
347			return -ENODEV;
348		}
349
350		if (c->parent != p) {
351			ret = clk_set_parent(c, p);
352			if (ret) {
353				pr_warning("Unable to set parent %s of clock %s: %d\n",
354					table->parent, table->name, ret);
355				return -EINVAL;
356			}
357		}
358	}
359
360	if (table->rate && table->rate != clk_get_rate(c)) {
361		ret = clk_set_rate(c, table->rate);
362		if (ret) {
363			pr_warning("Unable to set clock %s to rate %lu: %d\n",
364				table->name, table->rate, ret);
365			return -EINVAL;
366		}
367	}
368
369	if (table->enabled) {
370		ret = clk_enable(c);
371		if (ret) {
372			pr_warning("Unable to enable clock %s: %d\n",
373				table->name, ret);
374			return -EINVAL;
375		}
376	}
377
378	return 0;
379}
380
381void tegra_clk_init_from_table(struct tegra_clk_init_table *table)
382{
383	for (; table->name; table++)
384		tegra_clk_init_one_from_table(table);
385}
386EXPORT_SYMBOL(tegra_clk_init_from_table);
387
388void tegra_periph_reset_deassert(struct clk *c)
389{
390	tegra2_periph_reset_deassert(c);
391}
392EXPORT_SYMBOL(tegra_periph_reset_deassert);
393
394void tegra_periph_reset_assert(struct clk *c)
395{
396	tegra2_periph_reset_assert(c);
397}
398EXPORT_SYMBOL(tegra_periph_reset_assert);
399
400void __init tegra_init_clock(void)
401{
402	tegra2_init_clocks();
403}
404
405/*
406 * The SDMMC controllers have extra bits in the clock source register that
407 * adjust the delay between the clock and data to compenstate for delays
408 * on the PCB.
409 */
410void tegra_sdmmc_tap_delay(struct clk *c, int delay)
411{
412	unsigned long flags;
413
414	spin_lock_irqsave(&c->spinlock, flags);
415	tegra2_sdmmc_tap_delay(c, delay);
416	spin_unlock_irqrestore(&c->spinlock, flags);
417}
418
419#ifdef CONFIG_DEBUG_FS
420
421static int __clk_lock_all_spinlocks(void)
422{
423	struct clk *c;
424
425	list_for_each_entry(c, &clocks, node)
426		if (!spin_trylock(&c->spinlock))
427			goto unlock_spinlocks;
428
429	return 0;
430
431unlock_spinlocks:
432	list_for_each_entry_continue_reverse(c, &clocks, node)
433		spin_unlock(&c->spinlock);
434
435	return -EAGAIN;
436}
437
438static void __clk_unlock_all_spinlocks(void)
439{
440	struct clk *c;
441
442	list_for_each_entry_reverse(c, &clocks, node)
443		spin_unlock(&c->spinlock);
444}
445
446/*
447 * This function retries until it can take all locks, and may take
448 * an arbitrarily long time to complete.
449 * Must be called with irqs enabled, returns with irqs disabled
450 * Must be called with clock_list_lock held
451 */
452static void clk_lock_all(void)
453{
454	int ret;
455retry:
456	local_irq_disable();
457
458	ret = __clk_lock_all_spinlocks();
459	if (ret)
460		goto failed_spinlocks;
461
462	/* All locks taken successfully, return */
463	return;
464
465failed_spinlocks:
466	local_irq_enable();
467	yield();
468	goto retry;
469}
470
471/*
472 * Unlocks all clocks after a clk_lock_all
473 * Must be called with irqs disabled, returns with irqs enabled
474 * Must be called with clock_list_lock held
475 */
476static void clk_unlock_all(void)
477{
478	__clk_unlock_all_spinlocks();
479
480	local_irq_enable();
481}
482
483static struct dentry *clk_debugfs_root;
484
485
486static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
487{
488	struct clk *child;
489	const char *state = "uninit";
490	char div[8] = {0};
491
492	if (c->state == ON)
493		state = "on";
494	else if (c->state == OFF)
495		state = "off";
496
497	if (c->mul != 0 && c->div != 0) {
498		if (c->mul > c->div) {
499			int mul = c->mul / c->div;
500			int mul2 = (c->mul * 10 / c->div) % 10;
501			int mul3 = (c->mul * 10) % c->div;
502			if (mul2 == 0 && mul3 == 0)
503				snprintf(div, sizeof(div), "x%d", mul);
504			else if (mul3 == 0)
505				snprintf(div, sizeof(div), "x%d.%d", mul, mul2);
506			else
507				snprintf(div, sizeof(div), "x%d.%d..", mul, mul2);
508		} else {
509			snprintf(div, sizeof(div), "%d%s", c->div / c->mul,
510				(c->div % c->mul) ? ".5" : "");
511		}
512	}
513
514	seq_printf(s, "%*s%c%c%-*s %-6s %-3d %-8s %-10lu\n",
515		level * 3 + 1, "",
516		c->rate > c->max_rate ? '!' : ' ',
517		!c->set ? '*' : ' ',
518		30 - level * 3, c->name,
519		state, c->refcnt, div, clk_get_rate_all_locked(c));
520
521	list_for_each_entry(child, &clocks, node) {
522		if (child->parent != c)
523			continue;
524
525		clock_tree_show_one(s, child, level + 1);
526	}
527}
528
529static int clock_tree_show(struct seq_file *s, void *data)
530{
531	struct clk *c;
532	seq_printf(s, "   clock                          state  ref div      rate\n");
533	seq_printf(s, "--------------------------------------------------------------\n");
534
535	mutex_lock(&clock_list_lock);
536
537	clk_lock_all();
538
539	list_for_each_entry(c, &clocks, node)
540		if (c->parent == NULL)
541			clock_tree_show_one(s, c, 0);
542
543	clk_unlock_all();
544
545	mutex_unlock(&clock_list_lock);
546	return 0;
547}
548
549static int clock_tree_open(struct inode *inode, struct file *file)
550{
551	return single_open(file, clock_tree_show, inode->i_private);
552}
553
554static const struct file_operations clock_tree_fops = {
555	.open		= clock_tree_open,
556	.read		= seq_read,
557	.llseek		= seq_lseek,
558	.release	= single_release,
559};
560
561static int possible_parents_show(struct seq_file *s, void *data)
562{
563	struct clk *c = s->private;
564	int i;
565
566	for (i = 0; c->inputs[i].input; i++) {
567		char *first = (i == 0) ? "" : " ";
568		seq_printf(s, "%s%s", first, c->inputs[i].input->name);
569	}
570	seq_printf(s, "\n");
571	return 0;
572}
573
574static int possible_parents_open(struct inode *inode, struct file *file)
575{
576	return single_open(file, possible_parents_show, inode->i_private);
577}
578
579static const struct file_operations possible_parents_fops = {
580	.open		= possible_parents_open,
581	.read		= seq_read,
582	.llseek		= seq_lseek,
583	.release	= single_release,
584};
585
586static int clk_debugfs_register_one(struct clk *c)
587{
588	struct dentry *d;
589
590	d = debugfs_create_dir(c->name, clk_debugfs_root);
591	if (!d)
592		return -ENOMEM;
593	c->dent = d;
594
595	d = debugfs_create_u8("refcnt", S_IRUGO, c->dent, (u8 *)&c->refcnt);
596	if (!d)
597		goto err_out;
598
599	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
600	if (!d)
601		goto err_out;
602
603	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
604	if (!d)
605		goto err_out;
606
607	if (c->inputs) {
608		d = debugfs_create_file("possible_parents", S_IRUGO, c->dent,
609			c, &possible_parents_fops);
610		if (!d)
611			goto err_out;
612	}
613
614	return 0;
615
616err_out:
617	debugfs_remove_recursive(c->dent);
618	return -ENOMEM;
619}
620
621static int clk_debugfs_register(struct clk *c)
622{
623	int err;
624	struct clk *pa = c->parent;
625
626	if (pa && !pa->dent) {
627		err = clk_debugfs_register(pa);
628		if (err)
629			return err;
630	}
631
632	if (!c->dent) {
633		err = clk_debugfs_register_one(c);
634		if (err)
635			return err;
636	}
637	return 0;
638}
639
640static int __init clk_debugfs_init(void)
641{
642	struct clk *c;
643	struct dentry *d;
644	int err = -ENOMEM;
645
646	d = debugfs_create_dir("clock", NULL);
647	if (!d)
648		return -ENOMEM;
649	clk_debugfs_root = d;
650
651	d = debugfs_create_file("clock_tree", S_IRUGO, clk_debugfs_root, NULL,
652		&clock_tree_fops);
653	if (!d)
654		goto err_out;
655
656	list_for_each_entry(c, &clocks, node) {
657		err = clk_debugfs_register(c);
658		if (err)
659			goto err_out;
660	}
661	return 0;
662err_out:
663	debugfs_remove_recursive(clk_debugfs_root);
664	return err;
665}
666
667late_initcall(clk_debugfs_init);
668#endif