Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
  1/*
  2 * Clock and PLL control for DaVinci devices
  3 *
  4 * Copyright (C) 2006-2007 Texas Instruments.
  5 * Copyright (C) 2008-2009 Deep Root Systems, LLC
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License as published by
  9 * the Free Software Foundation; either version 2 of the License, or
 10 * (at your option) any later version.
 11 */
 12
 13#include <linux/module.h>
 14#include <linux/kernel.h>
 15#include <linux/init.h>
 16#include <linux/errno.h>
 17#include <linux/clk.h>
 18#include <linux/err.h>
 19#include <linux/mutex.h>
 20#include <linux/io.h>
 21#include <linux/delay.h>
 22
 23#include <mach/hardware.h>
 24
 25#include <mach/clock.h>
 26#include "psc.h"
 27#include <mach/cputype.h>
 28#include "clock.h"
 29
 30static LIST_HEAD(clocks);
 31static DEFINE_MUTEX(clocks_mutex);
 32static DEFINE_SPINLOCK(clockfw_lock);
 33
 34void davinci_clk_enable(struct clk *clk)
 35{
 36	if (clk->parent)
 37		davinci_clk_enable(clk->parent);
 38	if (clk->usecount++ == 0) {
 39		if (clk->flags & CLK_PSC)
 40			davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
 41					   true, clk->flags);
 42		else if (clk->clk_enable)
 43			clk->clk_enable(clk);
 44	}
 45}
 46
 47void davinci_clk_disable(struct clk *clk)
 48{
 49	if (WARN_ON(clk->usecount == 0))
 50		return;
 51	if (--clk->usecount == 0) {
 52		if (!(clk->flags & CLK_PLL) && (clk->flags & CLK_PSC))
 53			davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
 54					   false, clk->flags);
 55		else if (clk->clk_disable)
 56			clk->clk_disable(clk);
 57	}
 58	if (clk->parent)
 59		davinci_clk_disable(clk->parent);
 60}
 61
 62int davinci_clk_reset(struct clk *clk, bool reset)
 63{
 64	unsigned long flags;
 65
 66	if (clk == NULL || IS_ERR(clk))
 67		return -EINVAL;
 68
 69	spin_lock_irqsave(&clockfw_lock, flags);
 70	if (clk->flags & CLK_PSC)
 71		davinci_psc_reset(clk->gpsc, clk->lpsc, reset);
 72	spin_unlock_irqrestore(&clockfw_lock, flags);
 73
 74	return 0;
 75}
 76EXPORT_SYMBOL(davinci_clk_reset);
 77
 78int davinci_clk_reset_assert(struct clk *clk)
 79{
 80	if (clk == NULL || IS_ERR(clk) || !clk->reset)
 81		return -EINVAL;
 82
 83	return clk->reset(clk, true);
 84}
 85EXPORT_SYMBOL(davinci_clk_reset_assert);
 86
 87int davinci_clk_reset_deassert(struct clk *clk)
 88{
 89	if (clk == NULL || IS_ERR(clk) || !clk->reset)
 90		return -EINVAL;
 91
 92	return clk->reset(clk, false);
 93}
 94EXPORT_SYMBOL(davinci_clk_reset_deassert);
 95
 96int clk_enable(struct clk *clk)
 97{
 98	unsigned long flags;
 99
100	if (!clk)
101		return 0;
102	else if (IS_ERR(clk))
103		return -EINVAL;
104
105	spin_lock_irqsave(&clockfw_lock, flags);
106	davinci_clk_enable(clk);
107	spin_unlock_irqrestore(&clockfw_lock, flags);
108
109	return 0;
110}
111EXPORT_SYMBOL(clk_enable);
112
113void clk_disable(struct clk *clk)
114{
115	unsigned long flags;
116
117	if (clk == NULL || IS_ERR(clk))
118		return;
119
120	spin_lock_irqsave(&clockfw_lock, flags);
121	davinci_clk_disable(clk);
122	spin_unlock_irqrestore(&clockfw_lock, flags);
123}
124EXPORT_SYMBOL(clk_disable);
125
126unsigned long clk_get_rate(struct clk *clk)
127{
128	if (clk == NULL || IS_ERR(clk))
129		return 0;
130
131	return clk->rate;
132}
133EXPORT_SYMBOL(clk_get_rate);
134
135long clk_round_rate(struct clk *clk, unsigned long rate)
136{
137	if (clk == NULL || IS_ERR(clk))
138		return 0;
139
140	if (clk->round_rate)
141		return clk->round_rate(clk, rate);
142
143	return clk->rate;
144}
145EXPORT_SYMBOL(clk_round_rate);
146
147/* Propagate rate to children */
148static void propagate_rate(struct clk *root)
149{
150	struct clk *clk;
151
152	list_for_each_entry(clk, &root->children, childnode) {
153		if (clk->recalc)
154			clk->rate = clk->recalc(clk);
155		propagate_rate(clk);
156	}
157}
158
159int clk_set_rate(struct clk *clk, unsigned long rate)
160{
161	unsigned long flags;
162	int ret = -EINVAL;
163
164	if (!clk)
165		return 0;
166	else if (IS_ERR(clk))
167		return -EINVAL;
168
169	if (clk->set_rate)
170		ret = clk->set_rate(clk, rate);
171
172	spin_lock_irqsave(&clockfw_lock, flags);
173	if (ret == 0) {
174		if (clk->recalc)
175			clk->rate = clk->recalc(clk);
176		propagate_rate(clk);
177	}
178	spin_unlock_irqrestore(&clockfw_lock, flags);
179
180	return ret;
181}
182EXPORT_SYMBOL(clk_set_rate);
183
184int clk_set_parent(struct clk *clk, struct clk *parent)
185{
186	unsigned long flags;
187
188	if (!clk)
189		return 0;
190	else if (IS_ERR(clk))
191		return -EINVAL;
192
193	/* Cannot change parent on enabled clock */
194	if (WARN_ON(clk->usecount))
195		return -EINVAL;
196
197	mutex_lock(&clocks_mutex);
198	if (clk->set_parent) {
199		int ret = clk->set_parent(clk, parent);
200
201		if (ret) {
202			mutex_unlock(&clocks_mutex);
203			return ret;
204		}
205	}
206	clk->parent = parent;
207	list_del_init(&clk->childnode);
208	list_add(&clk->childnode, &clk->parent->children);
209	mutex_unlock(&clocks_mutex);
210
211	spin_lock_irqsave(&clockfw_lock, flags);
212	if (clk->recalc)
213		clk->rate = clk->recalc(clk);
214	propagate_rate(clk);
215	spin_unlock_irqrestore(&clockfw_lock, flags);
216
217	return 0;
218}
219EXPORT_SYMBOL(clk_set_parent);
220
221struct clk *clk_get_parent(struct clk *clk)
222{
223	if (!clk)
224		return NULL;
225
226	return clk->parent;
227}
228EXPORT_SYMBOL(clk_get_parent);
229
230int clk_register(struct clk *clk)
231{
232	if (clk == NULL || IS_ERR(clk))
233		return -EINVAL;
234
235	if (WARN(clk->parent && !clk->parent->rate,
236			"CLK: %s parent %s has no rate!\n",
237			clk->name, clk->parent->name))
238		return -EINVAL;
239
240	INIT_LIST_HEAD(&clk->children);
241
242	mutex_lock(&clocks_mutex);
243	list_add_tail(&clk->node, &clocks);
244	if (clk->parent) {
245		if (clk->set_parent) {
246			int ret = clk->set_parent(clk, clk->parent);
247
248			if (ret) {
249				mutex_unlock(&clocks_mutex);
250				return ret;
251			}
252		}
253		list_add_tail(&clk->childnode, &clk->parent->children);
254	}
255	mutex_unlock(&clocks_mutex);
256
257	/* If rate is already set, use it */
258	if (clk->rate)
259		return 0;
260
261	/* Else, see if there is a way to calculate it */
262	if (clk->recalc)
263		clk->rate = clk->recalc(clk);
264
265	/* Otherwise, default to parent rate */
266	else if (clk->parent)
267		clk->rate = clk->parent->rate;
268
269	return 0;
270}
271EXPORT_SYMBOL(clk_register);
272
273void clk_unregister(struct clk *clk)
274{
275	if (clk == NULL || IS_ERR(clk))
276		return;
277
278	mutex_lock(&clocks_mutex);
279	list_del(&clk->node);
280	list_del(&clk->childnode);
281	mutex_unlock(&clocks_mutex);
282}
283EXPORT_SYMBOL(clk_unregister);
284
285#ifdef CONFIG_DAVINCI_RESET_CLOCKS
286/*
287 * Disable any unused clocks left on by the bootloader
288 */
289int __init davinci_clk_disable_unused(void)
290{
291	struct clk *ck;
292
293	spin_lock_irq(&clockfw_lock);
294	list_for_each_entry(ck, &clocks, node) {
295		if (ck->usecount > 0)
296			continue;
297		if (!(ck->flags & CLK_PSC))
298			continue;
299
300		/* ignore if in Disabled or SwRstDisable states */
301		if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
302			continue;
303
304		pr_debug("Clocks: disable unused %s\n", ck->name);
305
306		davinci_psc_config(ck->domain, ck->gpsc, ck->lpsc,
307				false, ck->flags);
308	}
309	spin_unlock_irq(&clockfw_lock);
310
311	return 0;
312}
313#endif
314
315static unsigned long clk_sysclk_recalc(struct clk *clk)
316{
317	u32 v, plldiv;
318	struct pll_data *pll;
319	unsigned long rate = clk->rate;
320
321	/* If this is the PLL base clock, no more calculations needed */
322	if (clk->pll_data)
323		return rate;
324
325	if (WARN_ON(!clk->parent))
326		return rate;
327
328	rate = clk->parent->rate;
329
330	/* Otherwise, the parent must be a PLL */
331	if (WARN_ON(!clk->parent->pll_data))
332		return rate;
333
334	pll = clk->parent->pll_data;
335
336	/* If pre-PLL, source clock is before the multiplier and divider(s) */
337	if (clk->flags & PRE_PLL)
338		rate = pll->input_rate;
339
340	if (!clk->div_reg)
341		return rate;
342
343	v = __raw_readl(pll->base + clk->div_reg);
344	if (v & PLLDIV_EN) {
345		plldiv = (v & pll->div_ratio_mask) + 1;
346		if (plldiv)
347			rate /= plldiv;
348	}
349
350	return rate;
351}
352
353int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
354{
355	unsigned v;
356	struct pll_data *pll;
357	unsigned long input;
358	unsigned ratio = 0;
359
360	/* If this is the PLL base clock, wrong function to call */
361	if (clk->pll_data)
362		return -EINVAL;
363
364	/* There must be a parent... */
365	if (WARN_ON(!clk->parent))
366		return -EINVAL;
367
368	/* ... the parent must be a PLL... */
369	if (WARN_ON(!clk->parent->pll_data))
370		return -EINVAL;
371
372	/* ... and this clock must have a divider. */
373	if (WARN_ON(!clk->div_reg))
374		return -EINVAL;
375
376	pll = clk->parent->pll_data;
377
378	input = clk->parent->rate;
379
380	/* If pre-PLL, source clock is before the multiplier and divider(s) */
381	if (clk->flags & PRE_PLL)
382		input = pll->input_rate;
383
384	if (input > rate) {
385		/*
386		 * Can afford to provide an output little higher than requested
387		 * only if maximum rate supported by hardware on this sysclk
388		 * is known.
389		 */
390		if (clk->maxrate) {
391			ratio = DIV_ROUND_CLOSEST(input, rate);
392			if (input / ratio > clk->maxrate)
393				ratio = 0;
394		}
395
396		if (ratio == 0)
397			ratio = DIV_ROUND_UP(input, rate);
398
399		ratio--;
400	}
401
402	if (ratio > pll->div_ratio_mask)
403		return -EINVAL;
404
405	do {
406		v = __raw_readl(pll->base + PLLSTAT);
407	} while (v & PLLSTAT_GOSTAT);
408
409	v = __raw_readl(pll->base + clk->div_reg);
410	v &= ~pll->div_ratio_mask;
411	v |= ratio | PLLDIV_EN;
412	__raw_writel(v, pll->base + clk->div_reg);
413
414	v = __raw_readl(pll->base + PLLCMD);
415	v |= PLLCMD_GOSET;
416	__raw_writel(v, pll->base + PLLCMD);
417
418	do {
419		v = __raw_readl(pll->base + PLLSTAT);
420	} while (v & PLLSTAT_GOSTAT);
421
422	return 0;
423}
424EXPORT_SYMBOL(davinci_set_sysclk_rate);
425
426static unsigned long clk_leafclk_recalc(struct clk *clk)
427{
428	if (WARN_ON(!clk->parent))
429		return clk->rate;
430
431	return clk->parent->rate;
432}
433
434int davinci_simple_set_rate(struct clk *clk, unsigned long rate)
435{
436	clk->rate = rate;
437	return 0;
438}
439
440static unsigned long clk_pllclk_recalc(struct clk *clk)
441{
442	u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
443	u8 bypass;
444	struct pll_data *pll = clk->pll_data;
445	unsigned long rate = clk->rate;
446
447	ctrl = __raw_readl(pll->base + PLLCTL);
448	rate = pll->input_rate = clk->parent->rate;
449
450	if (ctrl & PLLCTL_PLLEN) {
451		bypass = 0;
452		mult = __raw_readl(pll->base + PLLM);
453		if (cpu_is_davinci_dm365())
454			mult = 2 * (mult & PLLM_PLLM_MASK);
455		else
456			mult = (mult & PLLM_PLLM_MASK) + 1;
457	} else
458		bypass = 1;
459
460	if (pll->flags & PLL_HAS_PREDIV) {
461		prediv = __raw_readl(pll->base + PREDIV);
462		if (prediv & PLLDIV_EN)
463			prediv = (prediv & pll->div_ratio_mask) + 1;
464		else
465			prediv = 1;
466	}
467
468	/* pre-divider is fixed, but (some?) chips won't report that */
469	if (cpu_is_davinci_dm355() && pll->num == 1)
470		prediv = 8;
471
472	if (pll->flags & PLL_HAS_POSTDIV) {
473		postdiv = __raw_readl(pll->base + POSTDIV);
474		if (postdiv & PLLDIV_EN)
475			postdiv = (postdiv & pll->div_ratio_mask) + 1;
476		else
477			postdiv = 1;
478	}
479
480	if (!bypass) {
481		rate /= prediv;
482		rate *= mult;
483		rate /= postdiv;
484	}
485
486	pr_debug("PLL%d: input = %lu MHz [ ",
487		 pll->num, clk->parent->rate / 1000000);
488	if (bypass)
489		pr_debug("bypass ");
490	if (prediv > 1)
491		pr_debug("/ %d ", prediv);
492	if (mult > 1)
493		pr_debug("* %d ", mult);
494	if (postdiv > 1)
495		pr_debug("/ %d ", postdiv);
496	pr_debug("] --> %lu MHz output.\n", rate / 1000000);
497
498	return rate;
499}
500
501/**
502 * davinci_set_pllrate - set the output rate of a given PLL.
503 *
504 * Note: Currently tested to work with OMAP-L138 only.
505 *
506 * @pll: pll whose rate needs to be changed.
507 * @prediv: The pre divider value. Passing 0 disables the pre-divider.
508 * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
509 * @postdiv: The post divider value. Passing 0 disables the post-divider.
510 */
511int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
512					unsigned int mult, unsigned int postdiv)
513{
514	u32 ctrl;
515	unsigned int locktime;
516	unsigned long flags;
517
518	if (pll->base == NULL)
519		return -EINVAL;
520
521	/*
522	 *  PLL lock time required per OMAP-L138 datasheet is
523	 * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
524	 * as 4 and OSCIN cycle as 25 MHz.
525	 */
526	if (prediv) {
527		locktime = ((2000 * prediv) / 100);
528		prediv = (prediv - 1) | PLLDIV_EN;
529	} else {
530		locktime = PLL_LOCK_TIME;
531	}
532	if (postdiv)
533		postdiv = (postdiv - 1) | PLLDIV_EN;
534	if (mult)
535		mult = mult - 1;
536
537	/* Protect against simultaneous calls to PLL setting seqeunce */
538	spin_lock_irqsave(&clockfw_lock, flags);
539
540	ctrl = __raw_readl(pll->base + PLLCTL);
541
542	/* Switch the PLL to bypass mode */
543	ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
544	__raw_writel(ctrl, pll->base + PLLCTL);
545
546	udelay(PLL_BYPASS_TIME);
547
548	/* Reset and enable PLL */
549	ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
550	__raw_writel(ctrl, pll->base + PLLCTL);
551
552	if (pll->flags & PLL_HAS_PREDIV)
553		__raw_writel(prediv, pll->base + PREDIV);
554
555	__raw_writel(mult, pll->base + PLLM);
556
557	if (pll->flags & PLL_HAS_POSTDIV)
558		__raw_writel(postdiv, pll->base + POSTDIV);
559
560	udelay(PLL_RESET_TIME);
561
562	/* Bring PLL out of reset */
563	ctrl |= PLLCTL_PLLRST;
564	__raw_writel(ctrl, pll->base + PLLCTL);
565
566	udelay(locktime);
567
568	/* Remove PLL from bypass mode */
569	ctrl |= PLLCTL_PLLEN;
570	__raw_writel(ctrl, pll->base + PLLCTL);
571
572	spin_unlock_irqrestore(&clockfw_lock, flags);
573
574	return 0;
575}
576EXPORT_SYMBOL(davinci_set_pllrate);
577
578/**
579 * davinci_set_refclk_rate() - Set the reference clock rate
580 * @rate:	The new rate.
581 *
582 * Sets the reference clock rate to a given value. This will most likely
583 * result in the entire clock tree getting updated.
584 *
585 * This is used to support boards which use a reference clock different
586 * than that used by default in <soc>.c file. The reference clock rate
587 * should be updated early in the boot process; ideally soon after the
588 * clock tree has been initialized once with the default reference clock
589 * rate (davinci_clk_init()).
590 *
591 * Returns 0 on success, error otherwise.
592 */
593int davinci_set_refclk_rate(unsigned long rate)
594{
595	struct clk *refclk;
596
597	refclk = clk_get(NULL, "ref");
598	if (IS_ERR(refclk)) {
599		pr_err("%s: failed to get reference clock\n", __func__);
600		return PTR_ERR(refclk);
601	}
602
603	clk_set_rate(refclk, rate);
604
605	clk_put(refclk);
606
607	return 0;
608}
609
610int __init davinci_clk_init(struct clk_lookup *clocks)
611{
612	struct clk_lookup *c;
613	struct clk *clk;
614	size_t num_clocks = 0;
615
616	for (c = clocks; c->clk; c++) {
617		clk = c->clk;
618
619		if (!clk->recalc) {
620
621			/* Check if clock is a PLL */
622			if (clk->pll_data)
623				clk->recalc = clk_pllclk_recalc;
624
625			/* Else, if it is a PLL-derived clock */
626			else if (clk->flags & CLK_PLL)
627				clk->recalc = clk_sysclk_recalc;
628
629			/* Otherwise, it is a leaf clock (PSC clock) */
630			else if (clk->parent)
631				clk->recalc = clk_leafclk_recalc;
632		}
633
634		if (clk->pll_data) {
635			struct pll_data *pll = clk->pll_data;
636
637			if (!pll->div_ratio_mask)
638				pll->div_ratio_mask = PLLDIV_RATIO_MASK;
639
640			if (pll->phys_base && !pll->base) {
641				pll->base = ioremap(pll->phys_base, SZ_4K);
642				WARN_ON(!pll->base);
643			}
644		}
645
646		if (clk->recalc)
647			clk->rate = clk->recalc(clk);
648
649		if (clk->lpsc)
650			clk->flags |= CLK_PSC;
651
652		if (clk->flags & PSC_LRST)
653			clk->reset = davinci_clk_reset;
654
655		clk_register(clk);
656		num_clocks++;
657
658		/* Turn on clocks that Linux doesn't otherwise manage */
659		if (clk->flags & ALWAYS_ENABLED)
660			clk_enable(clk);
661	}
662
663	clkdev_add_table(clocks, num_clocks);
664
665	return 0;
666}
667
668#ifdef CONFIG_DEBUG_FS
669
670#include <linux/debugfs.h>
671#include <linux/seq_file.h>
672
673#define CLKNAME_MAX	10		/* longest clock name */
674#define NEST_DELTA	2
675#define NEST_MAX	4
676
677static void
678dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
679{
680	char		*state;
681	char		buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
682	struct clk	*clk;
683	unsigned	i;
684
685	if (parent->flags & CLK_PLL)
686		state = "pll";
687	else if (parent->flags & CLK_PSC)
688		state = "psc";
689	else
690		state = "";
691
692	/* <nest spaces> name <pad to end> */
693	memset(buf, ' ', sizeof(buf) - 1);
694	buf[sizeof(buf) - 1] = 0;
695	i = strlen(parent->name);
696	memcpy(buf + nest, parent->name,
697			min(i, (unsigned)(sizeof(buf) - 1 - nest)));
698
699	seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
700		   buf, parent->usecount, state, clk_get_rate(parent));
701	/* REVISIT show device associations too */
702
703	/* cost is now small, but not linear... */
704	list_for_each_entry(clk, &parent->children, childnode) {
705		dump_clock(s, nest + NEST_DELTA, clk);
706	}
707}
708
709static int davinci_ck_show(struct seq_file *m, void *v)
710{
711	struct clk *clk;
712
713	/*
714	 * Show clock tree; We trust nonzero usecounts equate to PSC enables...
715	 */
716	mutex_lock(&clocks_mutex);
717	list_for_each_entry(clk, &clocks, node)
718		if (!clk->parent)
719			dump_clock(m, 0, clk);
720	mutex_unlock(&clocks_mutex);
721
722	return 0;
723}
724
725static int davinci_ck_open(struct inode *inode, struct file *file)
726{
727	return single_open(file, davinci_ck_show, NULL);
728}
729
730static const struct file_operations davinci_ck_operations = {
731	.open		= davinci_ck_open,
732	.read		= seq_read,
733	.llseek		= seq_lseek,
734	.release	= single_release,
735};
736
737static int __init davinci_clk_debugfs_init(void)
738{
739	debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
740						&davinci_ck_operations);
741	return 0;
742
743}
744device_initcall(davinci_clk_debugfs_init);
745#endif /* CONFIG_DEBUG_FS */