Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v3.15
 
   1/*
   2 *  linux/arch/arm/mach-omap1/clock.c
   3 *
   4 *  Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
   5 *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
   6 *
   7 *  Modified to use omap shared clock framework by
   8 *  Tony Lindgren <tony@atomide.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14#include <linux/kernel.h>
  15#include <linux/export.h>
  16#include <linux/list.h>
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <linux/io.h>
  20#include <linux/clk.h>
  21#include <linux/clkdev.h>
 
 
 
  22
  23#include <asm/mach-types.h>
  24
  25#include <mach/hardware.h>
  26
  27#include "soc.h"
  28#include "iomap.h"
  29#include "clock.h"
  30#include "opp.h"
  31#include "sram.h"
  32
  33__u32 arm_idlect1_mask;
  34struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
 
  35
  36static LIST_HEAD(clocks);
  37static DEFINE_MUTEX(clocks_mutex);
  38static DEFINE_SPINLOCK(clockfw_lock);
 
 
 
  39
  40/*
  41 * Omap1 specific clock functions
  42 */
  43
  44unsigned long omap1_uart_recalc(struct clk *clk)
  45{
  46	unsigned int val = __raw_readl(clk->enable_reg);
  47	return val & clk->enable_bit ? 48000000 : 12000000;
  48}
  49
  50unsigned long omap1_sossi_recalc(struct clk *clk)
  51{
  52	u32 div = omap_readl(MOD_CONF_CTRL_1);
  53
  54	div = (div >> 17) & 0x7;
  55	div++;
  56
  57	return clk->parent->rate / div;
  58}
  59
  60static void omap1_clk_allow_idle(struct clk *clk)
  61{
  62	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
  63
  64	if (!(clk->flags & CLOCK_IDLE_CONTROL))
  65		return;
  66
  67	if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
  68		arm_idlect1_mask |= 1 << iclk->idlect_shift;
  69}
  70
  71static void omap1_clk_deny_idle(struct clk *clk)
  72{
  73	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
  74
  75	if (!(clk->flags & CLOCK_IDLE_CONTROL))
  76		return;
  77
  78	if (iclk->no_idle_count++ == 0)
  79		arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
  80}
  81
  82static __u16 verify_ckctl_value(__u16 newval)
  83{
  84	/* This function checks for following limitations set
  85	 * by the hardware (all conditions must be true):
  86	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
  87	 * ARM_CK >= TC_CK
  88	 * DSP_CK >= TC_CK
  89	 * DSPMMU_CK >= TC_CK
  90	 *
  91	 * In addition following rules are enforced:
  92	 * LCD_CK <= TC_CK
  93	 * ARMPER_CK <= TC_CK
  94	 *
  95	 * However, maximum frequencies are not checked for!
  96	 */
  97	__u8 per_exp;
  98	__u8 lcd_exp;
  99	__u8 arm_exp;
 100	__u8 dsp_exp;
 101	__u8 tc_exp;
 102	__u8 dspmmu_exp;
 103
 104	per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
 105	lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
 106	arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
 107	dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
 108	tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
 109	dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
 110
 111	if (dspmmu_exp < dsp_exp)
 112		dspmmu_exp = dsp_exp;
 113	if (dspmmu_exp > dsp_exp+1)
 114		dspmmu_exp = dsp_exp+1;
 115	if (tc_exp < arm_exp)
 116		tc_exp = arm_exp;
 117	if (tc_exp < dspmmu_exp)
 118		tc_exp = dspmmu_exp;
 119	if (tc_exp > lcd_exp)
 120		lcd_exp = tc_exp;
 121	if (tc_exp > per_exp)
 122		per_exp = tc_exp;
 123
 124	newval &= 0xf000;
 125	newval |= per_exp << CKCTL_PERDIV_OFFSET;
 126	newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
 127	newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
 128	newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
 129	newval |= tc_exp << CKCTL_TCDIV_OFFSET;
 130	newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
 131
 132	return newval;
 133}
 134
 135static int calc_dsor_exp(struct clk *clk, unsigned long rate)
 136{
 137	/* Note: If target frequency is too low, this function will return 4,
 138	 * which is invalid value. Caller must check for this value and act
 139	 * accordingly.
 140	 *
 141	 * Note: This function does not check for following limitations set
 142	 * by the hardware (all conditions must be true):
 143	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
 144	 * ARM_CK >= TC_CK
 145	 * DSP_CK >= TC_CK
 146	 * DSPMMU_CK >= TC_CK
 147	 */
 148	unsigned long realrate;
 149	struct clk * parent;
 150	unsigned  dsor_exp;
 151
 152	parent = clk->parent;
 153	if (unlikely(parent == NULL))
 154		return -EIO;
 155
 156	realrate = parent->rate;
 157	for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
 158		if (realrate <= rate)
 159			break;
 160
 161		realrate /= 2;
 162	}
 163
 164	return dsor_exp;
 165}
 166
 167unsigned long omap1_ckctl_recalc(struct clk *clk)
 168{
 169	/* Calculate divisor encoded as 2-bit exponent */
 170	int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
 171
 172	return clk->parent->rate / dsor;
 
 
 173}
 174
 175unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
 176{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 177	int dsor;
 178
 179	/* Calculate divisor encoded as 2-bit exponent
 180	 *
 181	 * The clock control bits are in DSP domain,
 182	 * so api_ck is needed for access.
 183	 * Note that DSP_CKCTL virt addr = phys addr, so
 184	 * we must use __raw_readw() instead of omap_readw().
 185	 */
 186	omap1_clk_enable(api_ck_p);
 
 
 187	dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
 188	omap1_clk_disable(api_ck_p);
 
 189
 190	return clk->parent->rate / dsor;
 191}
 192
 193/* MPU virtual clock functions */
 194int omap1_select_table_rate(struct clk *clk, unsigned long rate)
 195{
 196	/* Find the highest supported frequency <= rate and switch to it */
 197	struct mpu_rate * ptr;
 198	unsigned long ref_rate;
 199
 200	ref_rate = ck_ref_p->rate;
 201
 202	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
 203		if (!(ptr->flags & cpu_mask))
 204			continue;
 205
 206		if (ptr->xtal != ref_rate)
 207			continue;
 208
 209		/* Can check only after xtal frequency check */
 210		if (ptr->rate <= rate)
 211			break;
 212	}
 213
 214	if (!ptr->rate)
 215		return -EINVAL;
 216
 217	/*
 218	 * In most cases we should not need to reprogram DPLL.
 219	 * Reprogramming the DPLL is tricky, it must be done from SRAM.
 220	 */
 221	omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
 222
 223	/* XXX Do we need to recalculate the tree below DPLL1 at this point? */
 224	ck_dpll1_p->rate = ptr->pll_rate;
 225
 226	return 0;
 227}
 228
 229int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
 230{
 231	int dsor_exp;
 232	u16 regval;
 233
 234	dsor_exp = calc_dsor_exp(clk, rate);
 235	if (dsor_exp > 3)
 236		dsor_exp = -EINVAL;
 237	if (dsor_exp < 0)
 238		return dsor_exp;
 239
 240	regval = __raw_readw(DSP_CKCTL);
 241	regval &= ~(3 << clk->rate_offset);
 242	regval |= dsor_exp << clk->rate_offset;
 243	__raw_writew(regval, DSP_CKCTL);
 244	clk->rate = clk->parent->rate / (1 << dsor_exp);
 245
 246	return 0;
 247}
 248
 249long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
 
 250{
 251	int dsor_exp = calc_dsor_exp(clk, rate);
 
 252	if (dsor_exp < 0)
 253		return dsor_exp;
 254	if (dsor_exp > 3)
 255		dsor_exp = 3;
 256	return clk->parent->rate / (1 << dsor_exp);
 257}
 258
 259int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
 260{
 
 261	int dsor_exp;
 262	u16 regval;
 263
 264	dsor_exp = calc_dsor_exp(clk, rate);
 265	if (dsor_exp > 3)
 266		dsor_exp = -EINVAL;
 267	if (dsor_exp < 0)
 268		return dsor_exp;
 269
 
 
 
 270	regval = omap_readw(ARM_CKCTL);
 271	regval &= ~(3 << clk->rate_offset);
 272	regval |= dsor_exp << clk->rate_offset;
 273	regval = verify_ckctl_value(regval);
 274	omap_writew(regval, ARM_CKCTL);
 275	clk->rate = clk->parent->rate / (1 << dsor_exp);
 
 
 
 276	return 0;
 277}
 278
 279long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
 280{
 281	/* Find the highest supported frequency <= rate */
 282	struct mpu_rate * ptr;
 283	long highest_rate;
 284	unsigned long ref_rate;
 285
 286	ref_rate = ck_ref_p->rate;
 287
 288	highest_rate = -EINVAL;
 289
 290	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
 291		if (!(ptr->flags & cpu_mask))
 292			continue;
 293
 294		if (ptr->xtal != ref_rate)
 295			continue;
 296
 297		highest_rate = ptr->rate;
 298
 299		/* Can check only after xtal frequency check */
 300		if (ptr->rate <= rate)
 301			break;
 302	}
 303
 304	return highest_rate;
 305}
 306
 307static unsigned calc_ext_dsor(unsigned long rate)
 308{
 309	unsigned dsor;
 310
 311	/* MCLK and BCLK divisor selection is not linear:
 312	 * freq = 96MHz / dsor
 313	 *
 314	 * RATIO_SEL range: dsor <-> RATIO_SEL
 315	 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
 316	 * 6..48:  (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
 317	 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
 318	 * can not be used.
 319	 */
 320	for (dsor = 2; dsor < 96; ++dsor) {
 321		if ((dsor & 1) && dsor > 8)
 322			continue;
 323		if (rate >= 96000000 / dsor)
 324			break;
 325	}
 326	return dsor;
 327}
 328
 329/* XXX Only needed on 1510 */
 330int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
 
 
 
 
 
 331{
 
 332	unsigned int val;
 333
 334	val = __raw_readl(clk->enable_reg);
 335	if (rate == 12000000)
 336		val &= ~(1 << clk->enable_bit);
 337	else if (rate == 48000000)
 338		val |= (1 << clk->enable_bit);
 339	else
 340		return -EINVAL;
 
 
 
 
 
 341	__raw_writel(val, clk->enable_reg);
 
 
 
 342	clk->rate = rate;
 343
 344	return 0;
 345}
 346
 347/* External clock (MCLK & BCLK) functions */
 348int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
 349{
 
 350	unsigned dsor;
 351	__u16 ratio_bits;
 352
 353	dsor = calc_ext_dsor(rate);
 354	clk->rate = 96000000 / dsor;
 355	if (dsor > 8)
 356		ratio_bits = ((dsor - 8) / 2 + 6) << 2;
 357	else
 358		ratio_bits = (dsor - 2) << 2;
 359
 
 
 
 360	ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
 361	__raw_writew(ratio_bits, clk->enable_reg);
 362
 
 
 363	return 0;
 364}
 365
 366int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
 367{
 368	u32 l;
 369	int div;
 370	unsigned long p_rate;
 371
 372	p_rate = clk->parent->rate;
 373	/* Round towards slower frequency */
 374	div = (p_rate + rate - 1) / rate;
 375	div--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 376	if (div < 0 || div > 7)
 377		return -EINVAL;
 378
 
 
 
 379	l = omap_readl(MOD_CONF_CTRL_1);
 380	l &= ~(7 << 17);
 381	l |= div << 17;
 382	omap_writel(l, MOD_CONF_CTRL_1);
 383
 384	clk->rate = p_rate / (div + 1);
 385
 
 
 386	return 0;
 387}
 388
 389long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
 390{
 391	return 96000000 / calc_ext_dsor(rate);
 392}
 393
 394void omap1_init_ext_clk(struct clk *clk)
 395{
 396	unsigned dsor;
 397	__u16 ratio_bits;
 398
 399	/* Determine current rate and ensure clock is based on 96MHz APLL */
 400	ratio_bits = __raw_readw(clk->enable_reg) & ~1;
 401	__raw_writew(ratio_bits, clk->enable_reg);
 402
 403	ratio_bits = (ratio_bits & 0xfc) >> 2;
 404	if (ratio_bits > 6)
 405		dsor = (ratio_bits - 6) * 2 + 8;
 406	else
 407		dsor = ratio_bits + 2;
 408
 409	clk-> rate = 96000000 / dsor;
 
 
 410}
 411
 412int omap1_clk_enable(struct clk *clk)
 413{
 
 414	int ret = 0;
 415
 416	if (clk->usecount++ == 0) {
 417		if (clk->parent) {
 418			ret = omap1_clk_enable(clk->parent);
 419			if (ret)
 420				goto err;
 421
 422			if (clk->flags & CLOCK_NO_IDLE_PARENT)
 423				omap1_clk_deny_idle(clk->parent);
 424		}
 425
 
 426		ret = clk->ops->enable(clk);
 427		if (ret) {
 428			if (clk->parent)
 429				omap1_clk_disable(clk->parent);
 430			goto err;
 431		}
 432	}
 433	return ret;
 434
 435err:
 436	clk->usecount--;
 437	return ret;
 438}
 439
 440void omap1_clk_disable(struct clk *clk)
 441{
 442	if (clk->usecount > 0 && !(--clk->usecount)) {
 
 
 443		clk->ops->disable(clk);
 444		if (likely(clk->parent)) {
 445			omap1_clk_disable(clk->parent);
 446			if (clk->flags & CLOCK_NO_IDLE_PARENT)
 447				omap1_clk_allow_idle(clk->parent);
 448		}
 449	}
 450}
 451
 452static int omap1_clk_enable_generic(struct clk *clk)
 453{
 
 454	__u16 regval16;
 455	__u32 regval32;
 456
 457	if (unlikely(clk->enable_reg == NULL)) {
 458		printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
 459		       clk->name);
 460		return -EINVAL;
 461	}
 462
 
 
 
 
 
 
 
 
 
 
 
 
 463	if (clk->flags & ENABLE_REG_32BIT) {
 464		regval32 = __raw_readl(clk->enable_reg);
 465		regval32 |= (1 << clk->enable_bit);
 466		__raw_writel(regval32, clk->enable_reg);
 467	} else {
 468		regval16 = __raw_readw(clk->enable_reg);
 469		regval16 |= (1 << clk->enable_bit);
 470		__raw_writew(regval16, clk->enable_reg);
 471	}
 472
 
 
 
 
 
 
 
 
 
 
 
 473	return 0;
 474}
 475
 476static void omap1_clk_disable_generic(struct clk *clk)
 477{
 
 478	__u16 regval16;
 479	__u32 regval32;
 480
 481	if (clk->enable_reg == NULL)
 482		return;
 483
 
 
 
 
 
 
 
 
 
 
 
 
 484	if (clk->flags & ENABLE_REG_32BIT) {
 485		regval32 = __raw_readl(clk->enable_reg);
 486		regval32 &= ~(1 << clk->enable_bit);
 487		__raw_writel(regval32, clk->enable_reg);
 488	} else {
 489		regval16 = __raw_readw(clk->enable_reg);
 490		regval16 &= ~(1 << clk->enable_bit);
 491		__raw_writew(regval16, clk->enable_reg);
 492	}
 
 
 
 
 
 
 
 
 
 
 
 493}
 494
 495const struct clkops clkops_generic = {
 496	.enable		= omap1_clk_enable_generic,
 497	.disable	= omap1_clk_disable_generic,
 498};
 499
 500static int omap1_clk_enable_dsp_domain(struct clk *clk)
 501{
 502	int retval;
 
 
 
 
 
 503
 504	retval = omap1_clk_enable(api_ck_p);
 505	if (!retval) {
 506		retval = omap1_clk_enable_generic(clk);
 507		omap1_clk_disable(api_ck_p);
 
 
 508	}
 509
 510	return retval;
 511}
 512
 513static void omap1_clk_disable_dsp_domain(struct clk *clk)
 514{
 515	if (omap1_clk_enable(api_ck_p) == 0) {
 516		omap1_clk_disable_generic(clk);
 517		omap1_clk_disable(api_ck_p);
 518	}
 
 
 
 
 
 
 
 519}
 520
 521const struct clkops clkops_dspck = {
 522	.enable		= omap1_clk_enable_dsp_domain,
 523	.disable	= omap1_clk_disable_dsp_domain,
 524};
 525
 526/* XXX SYSC register handling does not belong in the clock framework */
 527static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
 528{
 529	int ret;
 530	struct uart_clk *uclk;
 531
 532	ret = omap1_clk_enable_generic(clk);
 533	if (ret == 0) {
 534		/* Set smart idle acknowledgement mode */
 535		uclk = (struct uart_clk *)clk;
 536		omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
 537			    uclk->sysc_addr);
 538	}
 539
 540	return ret;
 541}
 542
 543/* XXX SYSC register handling does not belong in the clock framework */
 544static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
 545{
 546	struct uart_clk *uclk;
 547
 548	/* Set force idle acknowledgement mode */
 549	uclk = (struct uart_clk *)clk;
 550	omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
 551
 552	omap1_clk_disable_generic(clk);
 553}
 554
 555/* XXX SYSC register handling does not belong in the clock framework */
 556const struct clkops clkops_uart_16xx = {
 557	.enable		= omap1_clk_enable_uart_functional_16xx,
 558	.disable	= omap1_clk_disable_uart_functional_16xx,
 559};
 560
 561long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
 562{
 563	if (clk->round_rate != NULL)
 564		return clk->round_rate(clk, rate);
 
 
 565
 566	return clk->rate;
 567}
 568
 569int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
 570{
 
 
 
 
 
 
 
 
 
 
 
 571	int  ret = -EINVAL;
 572
 573	if (clk->set_rate)
 574		ret = clk->set_rate(clk, rate);
 575	return ret;
 576}
 577
 578/*
 579 * Omap1 clock reset and init functions
 580 */
 581
 
 
 
 
 
 
 
 
 
 
 582#ifdef CONFIG_OMAP_RESET_CLOCKS
 583
 584void omap1_clk_disable_unused(struct clk *clk)
 585{
 586	__u32 regval32;
 
 587
 588	/* Clocks in the DSP domain need api_ck. Just assume bootloader
 589	 * has not enabled any DSP clocks */
 590	if (clk->enable_reg == DSP_IDLECT2) {
 591		pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
 592			clk->name);
 593		return;
 594	}
 595
 596	/* Is the clock already disabled? */
 597	if (clk->flags & ENABLE_REG_32BIT)
 598		regval32 = __raw_readl(clk->enable_reg);
 599	else
 600		regval32 = __raw_readw(clk->enable_reg);
 601
 602	if ((regval32 & (1 << clk->enable_bit)) == 0)
 603		return;
 604
 605	printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
 606	clk->ops->disable(clk);
 607	printk(" done\n");
 608}
 609
 610#endif
 611
 
 
 
 
 
 
 
 
 612
 613int clk_enable(struct clk *clk)
 614{
 615	unsigned long flags;
 616	int ret;
 617
 618	if (clk == NULL || IS_ERR(clk))
 619		return -EINVAL;
 620
 621	spin_lock_irqsave(&clockfw_lock, flags);
 622	ret = omap1_clk_enable(clk);
 623	spin_unlock_irqrestore(&clockfw_lock, flags);
 624
 625	return ret;
 626}
 627EXPORT_SYMBOL(clk_enable);
 628
 629void clk_disable(struct clk *clk)
 630{
 631	unsigned long flags;
 632
 633	if (clk == NULL || IS_ERR(clk))
 634		return;
 635
 636	spin_lock_irqsave(&clockfw_lock, flags);
 637	if (clk->usecount == 0) {
 638		pr_err("Trying disable clock %s with 0 usecount\n",
 639		       clk->name);
 640		WARN_ON(1);
 641		goto out;
 642	}
 643
 644	omap1_clk_disable(clk);
 645
 646out:
 647	spin_unlock_irqrestore(&clockfw_lock, flags);
 648}
 649EXPORT_SYMBOL(clk_disable);
 650
 651unsigned long clk_get_rate(struct clk *clk)
 652{
 653	unsigned long flags;
 654	unsigned long ret;
 655
 656	if (clk == NULL || IS_ERR(clk))
 657		return 0;
 658
 659	spin_lock_irqsave(&clockfw_lock, flags);
 660	ret = clk->rate;
 661	spin_unlock_irqrestore(&clockfw_lock, flags);
 662
 663	return ret;
 664}
 665EXPORT_SYMBOL(clk_get_rate);
 666
 667/*
 668 * Optional clock functions defined in include/linux/clk.h
 669 */
 670
 671long clk_round_rate(struct clk *clk, unsigned long rate)
 672{
 673	unsigned long flags;
 674	long ret;
 675
 676	if (clk == NULL || IS_ERR(clk))
 677		return 0;
 678
 679	spin_lock_irqsave(&clockfw_lock, flags);
 680	ret = omap1_clk_round_rate(clk, rate);
 681	spin_unlock_irqrestore(&clockfw_lock, flags);
 682
 683	return ret;
 684}
 685EXPORT_SYMBOL(clk_round_rate);
 686
 687int clk_set_rate(struct clk *clk, unsigned long rate)
 688{
 689	unsigned long flags;
 690	int ret = -EINVAL;
 691
 692	if (clk == NULL || IS_ERR(clk))
 693		return ret;
 694
 695	spin_lock_irqsave(&clockfw_lock, flags);
 696	ret = omap1_clk_set_rate(clk, rate);
 697	if (ret == 0)
 698		propagate_rate(clk);
 699	spin_unlock_irqrestore(&clockfw_lock, flags);
 700
 701	return ret;
 702}
 703EXPORT_SYMBOL(clk_set_rate);
 704
 705int clk_set_parent(struct clk *clk, struct clk *parent)
 706{
 707	WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
 708
 709	return -EINVAL;
 710}
 711EXPORT_SYMBOL(clk_set_parent);
 712
 713struct clk *clk_get_parent(struct clk *clk)
 714{
 715	return clk->parent;
 716}
 717EXPORT_SYMBOL(clk_get_parent);
 
 
 
 
 
 
 
 718
 719/*
 720 * OMAP specific clock functions shared between omap1 and omap2
 721 */
 722
 723int __initdata mpurate;
 724
 725/*
 726 * By default we use the rate set by the bootloader.
 727 * You can override this with mpurate= cmdline option.
 728 */
 729static int __init omap_clk_setup(char *str)
 730{
 731	get_option(&str, &mpurate);
 732
 733	if (!mpurate)
 734		return 1;
 735
 736	if (mpurate < 1000)
 737		mpurate *= 1000000;
 738
 739	return 1;
 740}
 741__setup("mpurate=", omap_clk_setup);
 742
 743/* Used for clocks that always have same value as the parent clock */
 744unsigned long followparent_recalc(struct clk *clk)
 745{
 746	return clk->parent->rate;
 747}
 748
 749/*
 750 * Used for clocks that have the same value as the parent clock,
 751 * divided by some factor
 752 */
 753unsigned long omap_fixed_divisor_recalc(struct clk *clk)
 754{
 755	WARN_ON(!clk->fixed_div);
 756
 757	return clk->parent->rate / clk->fixed_div;
 758}
 759
 760void clk_reparent(struct clk *child, struct clk *parent)
 761{
 762	list_del_init(&child->sibling);
 763	if (parent)
 764		list_add(&child->sibling, &parent->children);
 765	child->parent = parent;
 766
 767	/* now do the debugfs renaming to reattach the child
 768	   to the proper parent */
 769}
 770
 771/* Propagate rate to children */
 772void propagate_rate(struct clk *tclk)
 773{
 774	struct clk *clkp;
 775
 776	list_for_each_entry(clkp, &tclk->children, sibling) {
 777		if (clkp->recalc)
 778			clkp->rate = clkp->recalc(clkp);
 779		propagate_rate(clkp);
 780	}
 781}
 782
 783static LIST_HEAD(root_clks);
 784
 785/**
 786 * recalculate_root_clocks - recalculate and propagate all root clocks
 787 *
 788 * Recalculates all root clocks (clocks with no parent), which if the
 789 * clock's .recalc is set correctly, should also propagate their rates.
 790 * Called at init.
 791 */
 792void recalculate_root_clocks(void)
 793{
 794	struct clk *clkp;
 795
 796	list_for_each_entry(clkp, &root_clks, sibling) {
 797		if (clkp->recalc)
 798			clkp->rate = clkp->recalc(clkp);
 799		propagate_rate(clkp);
 800	}
 801}
 802
 803/**
 804 * clk_preinit - initialize any fields in the struct clk before clk init
 805 * @clk: struct clk * to initialize
 806 *
 807 * Initialize any struct clk fields needed before normal clk initialization
 808 * can run.  No return value.
 809 */
 810void clk_preinit(struct clk *clk)
 811{
 812	INIT_LIST_HEAD(&clk->children);
 813}
 814
 815int clk_register(struct clk *clk)
 816{
 817	if (clk == NULL || IS_ERR(clk))
 818		return -EINVAL;
 819
 820	/*
 821	 * trap out already registered clocks
 822	 */
 823	if (clk->node.next || clk->node.prev)
 824		return 0;
 825
 826	mutex_lock(&clocks_mutex);
 827	if (clk->parent)
 828		list_add(&clk->sibling, &clk->parent->children);
 829	else
 830		list_add(&clk->sibling, &root_clks);
 831
 832	list_add(&clk->node, &clocks);
 833	if (clk->init)
 834		clk->init(clk);
 835	mutex_unlock(&clocks_mutex);
 836
 837	return 0;
 838}
 839EXPORT_SYMBOL(clk_register);
 840
 841void clk_unregister(struct clk *clk)
 842{
 843	if (clk == NULL || IS_ERR(clk))
 844		return;
 845
 846	mutex_lock(&clocks_mutex);
 847	list_del(&clk->sibling);
 848	list_del(&clk->node);
 849	mutex_unlock(&clocks_mutex);
 850}
 851EXPORT_SYMBOL(clk_unregister);
 852
 853void clk_enable_init_clocks(void)
 854{
 855	struct clk *clkp;
 856
 857	list_for_each_entry(clkp, &clocks, node)
 858		if (clkp->flags & ENABLE_ON_INIT)
 859			clk_enable(clkp);
 860}
 861
 862/**
 863 * omap_clk_get_by_name - locate OMAP struct clk by its name
 864 * @name: name of the struct clk to locate
 865 *
 866 * Locate an OMAP struct clk by its name.  Assumes that struct clk
 867 * names are unique.  Returns NULL if not found or a pointer to the
 868 * struct clk if found.
 869 */
 870struct clk *omap_clk_get_by_name(const char *name)
 871{
 872	struct clk *c;
 873	struct clk *ret = NULL;
 874
 875	mutex_lock(&clocks_mutex);
 876
 877	list_for_each_entry(c, &clocks, node) {
 878		if (!strcmp(c->name, name)) {
 879			ret = c;
 880			break;
 881		}
 882	}
 883
 884	mutex_unlock(&clocks_mutex);
 885
 886	return ret;
 887}
 888
 889int omap_clk_enable_autoidle_all(void)
 890{
 891	struct clk *c;
 892	unsigned long flags;
 893
 894	spin_lock_irqsave(&clockfw_lock, flags);
 895
 896	list_for_each_entry(c, &clocks, node)
 897		if (c->ops->allow_idle)
 898			c->ops->allow_idle(c);
 899
 900	spin_unlock_irqrestore(&clockfw_lock, flags);
 901
 902	return 0;
 903}
 904
 905int omap_clk_disable_autoidle_all(void)
 906{
 907	struct clk *c;
 908	unsigned long flags;
 909
 910	spin_lock_irqsave(&clockfw_lock, flags);
 911
 912	list_for_each_entry(c, &clocks, node)
 913		if (c->ops->deny_idle)
 914			c->ops->deny_idle(c);
 915
 916	spin_unlock_irqrestore(&clockfw_lock, flags);
 917
 918	return 0;
 919}
 920
 921/*
 922 * Low level helpers
 923 */
 924static int clkll_enable_null(struct clk *clk)
 925{
 926	return 0;
 927}
 928
 929static void clkll_disable_null(struct clk *clk)
 930{
 931}
 932
 933const struct clkops clkops_null = {
 934	.enable		= clkll_enable_null,
 935	.disable	= clkll_disable_null,
 936};
 937
 938/*
 939 * Dummy clock
 940 *
 941 * Used for clock aliases that are needed on some OMAPs, but not others
 942 */
 943struct clk dummy_ck = {
 944	.name	= "dummy",
 945	.ops	= &clkops_null,
 946};
 947
 948/*
 949 *
 950 */
 951
 952#ifdef CONFIG_OMAP_RESET_CLOCKS
 953/*
 954 * Disable any unused clocks left on by the bootloader
 955 */
 956static int __init clk_disable_unused(void)
 957{
 958	struct clk *ck;
 959	unsigned long flags;
 960
 961	pr_info("clock: disabling unused clocks to save power\n");
 962
 963	spin_lock_irqsave(&clockfw_lock, flags);
 964	list_for_each_entry(ck, &clocks, node) {
 965		if (ck->ops == &clkops_null)
 966			continue;
 967
 968		if (ck->usecount > 0 || !ck->enable_reg)
 969			continue;
 970
 971		omap1_clk_disable_unused(ck);
 972	}
 973	spin_unlock_irqrestore(&clockfw_lock, flags);
 974
 975	return 0;
 976}
 977late_initcall(clk_disable_unused);
 978late_initcall(omap_clk_enable_autoidle_all);
 979#endif
 980
 981#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
 982/*
 983 *	debugfs support to trace clock tree hierarchy and attributes
 984 */
 985
 986#include <linux/debugfs.h>
 987#include <linux/seq_file.h>
 988
 989static struct dentry *clk_debugfs_root;
 990
 991static int clk_dbg_show_summary(struct seq_file *s, void *unused)
 992{
 993	struct clk *c;
 994	struct clk *pa;
 995
 996	mutex_lock(&clocks_mutex);
 997	seq_printf(s, "%-30s %-30s %-10s %s\n",
 998		   "clock-name", "parent-name", "rate", "use-count");
 999
1000	list_for_each_entry(c, &clocks, node) {
1001		pa = c->parent;
1002		seq_printf(s, "%-30s %-30s %-10lu %d\n",
1003			   c->name, pa ? pa->name : "none", c->rate,
1004			   c->usecount);
1005	}
1006	mutex_unlock(&clocks_mutex);
1007
1008	return 0;
1009}
1010
1011static int clk_dbg_open(struct inode *inode, struct file *file)
1012{
1013	return single_open(file, clk_dbg_show_summary, inode->i_private);
1014}
1015
1016static const struct file_operations debug_clock_fops = {
1017	.open           = clk_dbg_open,
1018	.read           = seq_read,
1019	.llseek         = seq_lseek,
1020	.release        = single_release,
1021};
1022
1023static int clk_debugfs_register_one(struct clk *c)
1024{
1025	int err;
1026	struct dentry *d;
1027	struct clk *pa = c->parent;
1028
1029	d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1030	if (!d)
1031		return -ENOMEM;
1032	c->dent = d;
1033
1034	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1035	if (!d) {
1036		err = -ENOMEM;
1037		goto err_out;
1038	}
1039	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1040	if (!d) {
1041		err = -ENOMEM;
1042		goto err_out;
1043	}
1044	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1045	if (!d) {
1046		err = -ENOMEM;
1047		goto err_out;
1048	}
1049	return 0;
1050
1051err_out:
1052	debugfs_remove_recursive(c->dent);
1053	return err;
1054}
1055
1056static int clk_debugfs_register(struct clk *c)
1057{
1058	int err;
1059	struct clk *pa = c->parent;
1060
1061	if (pa && !pa->dent) {
1062		err = clk_debugfs_register(pa);
1063		if (err)
1064			return err;
1065	}
1066
1067	if (!c->dent) {
1068		err = clk_debugfs_register_one(c);
1069		if (err)
1070			return err;
1071	}
1072	return 0;
1073}
1074
1075static int __init clk_debugfs_init(void)
1076{
1077	struct clk *c;
1078	struct dentry *d;
1079	int err;
1080
1081	d = debugfs_create_dir("clock", NULL);
1082	if (!d)
1083		return -ENOMEM;
1084	clk_debugfs_root = d;
1085
1086	list_for_each_entry(c, &clocks, node) {
1087		err = clk_debugfs_register(c);
1088		if (err)
1089			goto err_out;
1090	}
1091
1092	d = debugfs_create_file("summary", S_IRUGO,
1093		d, NULL, &debug_clock_fops);
1094	if (!d)
1095		return -ENOMEM;
1096
1097	return 0;
1098err_out:
1099	debugfs_remove_recursive(clk_debugfs_root);
1100	return err;
1101}
1102late_initcall(clk_debugfs_init);
1103
1104#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mach-omap1/clock.c
  4 *
  5 *  Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
  6 *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  7 *
  8 *  Modified to use omap shared clock framework by
  9 *  Tony Lindgren <tony@atomide.com>
 
 
 
 
 10 */
 11#include <linux/kernel.h>
 12#include <linux/export.h>
 13#include <linux/list.h>
 14#include <linux/errno.h>
 15#include <linux/err.h>
 16#include <linux/io.h>
 17#include <linux/clk.h>
 18#include <linux/clkdev.h>
 19#include <linux/clk-provider.h>
 20#include <linux/soc/ti/omap1-io.h>
 21#include <linux/spinlock.h>
 22
 23#include <asm/mach-types.h>
 24
 25#include "hardware.h"
 
 26#include "soc.h"
 27#include "iomap.h"
 28#include "clock.h"
 29#include "opp.h"
 30#include "sram.h"
 31
 32__u32 arm_idlect1_mask;
 33/* provide direct internal access (not via clk API) to some clocks */
 34struct omap1_clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
 35
 36/* protect registeres shared among clk_enable/disable() and clk_set_rate() operations */
 37static DEFINE_SPINLOCK(arm_ckctl_lock);
 38static DEFINE_SPINLOCK(arm_idlect2_lock);
 39static DEFINE_SPINLOCK(mod_conf_ctrl_0_lock);
 40static DEFINE_SPINLOCK(mod_conf_ctrl_1_lock);
 41static DEFINE_SPINLOCK(swd_clk_div_ctrl_sel_lock);
 42
 43/*
 44 * Omap1 specific clock functions
 45 */
 46
 47unsigned long omap1_uart_recalc(struct omap1_clk *clk, unsigned long p_rate)
 48{
 49	unsigned int val = __raw_readl(clk->enable_reg);
 50	return val & 1 << clk->enable_bit ? 48000000 : 12000000;
 51}
 52
 53unsigned long omap1_sossi_recalc(struct omap1_clk *clk, unsigned long p_rate)
 54{
 55	u32 div = omap_readl(MOD_CONF_CTRL_1);
 56
 57	div = (div >> 17) & 0x7;
 58	div++;
 59
 60	return p_rate / div;
 61}
 62
 63static void omap1_clk_allow_idle(struct omap1_clk *clk)
 64{
 65	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
 66
 67	if (!(clk->flags & CLOCK_IDLE_CONTROL))
 68		return;
 69
 70	if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
 71		arm_idlect1_mask |= 1 << iclk->idlect_shift;
 72}
 73
 74static void omap1_clk_deny_idle(struct omap1_clk *clk)
 75{
 76	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
 77
 78	if (!(clk->flags & CLOCK_IDLE_CONTROL))
 79		return;
 80
 81	if (iclk->no_idle_count++ == 0)
 82		arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
 83}
 84
 85static __u16 verify_ckctl_value(__u16 newval)
 86{
 87	/* This function checks for following limitations set
 88	 * by the hardware (all conditions must be true):
 89	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
 90	 * ARM_CK >= TC_CK
 91	 * DSP_CK >= TC_CK
 92	 * DSPMMU_CK >= TC_CK
 93	 *
 94	 * In addition following rules are enforced:
 95	 * LCD_CK <= TC_CK
 96	 * ARMPER_CK <= TC_CK
 97	 *
 98	 * However, maximum frequencies are not checked for!
 99	 */
100	__u8 per_exp;
101	__u8 lcd_exp;
102	__u8 arm_exp;
103	__u8 dsp_exp;
104	__u8 tc_exp;
105	__u8 dspmmu_exp;
106
107	per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
108	lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
109	arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
110	dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
111	tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
112	dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
113
114	if (dspmmu_exp < dsp_exp)
115		dspmmu_exp = dsp_exp;
116	if (dspmmu_exp > dsp_exp+1)
117		dspmmu_exp = dsp_exp+1;
118	if (tc_exp < arm_exp)
119		tc_exp = arm_exp;
120	if (tc_exp < dspmmu_exp)
121		tc_exp = dspmmu_exp;
122	if (tc_exp > lcd_exp)
123		lcd_exp = tc_exp;
124	if (tc_exp > per_exp)
125		per_exp = tc_exp;
126
127	newval &= 0xf000;
128	newval |= per_exp << CKCTL_PERDIV_OFFSET;
129	newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
130	newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
131	newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
132	newval |= tc_exp << CKCTL_TCDIV_OFFSET;
133	newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
134
135	return newval;
136}
137
138static int calc_dsor_exp(unsigned long rate, unsigned long realrate)
139{
140	/* Note: If target frequency is too low, this function will return 4,
141	 * which is invalid value. Caller must check for this value and act
142	 * accordingly.
143	 *
144	 * Note: This function does not check for following limitations set
145	 * by the hardware (all conditions must be true):
146	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
147	 * ARM_CK >= TC_CK
148	 * DSP_CK >= TC_CK
149	 * DSPMMU_CK >= TC_CK
150	 */
 
 
151	unsigned  dsor_exp;
152
153	if (unlikely(realrate == 0))
 
154		return -EIO;
155
 
156	for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
157		if (realrate <= rate)
158			break;
159
160		realrate /= 2;
161	}
162
163	return dsor_exp;
164}
165
166unsigned long omap1_ckctl_recalc(struct omap1_clk *clk, unsigned long p_rate)
167{
168	/* Calculate divisor encoded as 2-bit exponent */
169	int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
170
171	/* update locally maintained rate, required by arm_ck for omap1_show_rates() */
172	clk->rate = p_rate / dsor;
173	return clk->rate;
174}
175
176static int omap1_clk_is_enabled(struct clk_hw *hw)
177{
178	struct omap1_clk *clk = to_omap1_clk(hw);
179	bool api_ck_was_enabled = true;
180	__u32 regval32;
181	int ret;
182
183	if (!clk->ops)	/* no gate -- always enabled */
184		return 1;
185
186	if (clk->ops == &clkops_dspck) {
187		api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
188		if (!api_ck_was_enabled)
189			if (api_ck_p->ops->enable(api_ck_p) < 0)
190				return 0;
191	}
192
193	if (clk->flags & ENABLE_REG_32BIT)
194		regval32 = __raw_readl(clk->enable_reg);
195	else
196		regval32 = __raw_readw(clk->enable_reg);
197
198	ret = regval32 & (1 << clk->enable_bit);
199
200	if (!api_ck_was_enabled)
201		api_ck_p->ops->disable(api_ck_p);
202
203	return ret;
204}
205
206
207unsigned long omap1_ckctl_recalc_dsp_domain(struct omap1_clk *clk, unsigned long p_rate)
208{
209	bool api_ck_was_enabled;
210	int dsor;
211
212	/* Calculate divisor encoded as 2-bit exponent
213	 *
214	 * The clock control bits are in DSP domain,
215	 * so api_ck is needed for access.
216	 * Note that DSP_CKCTL virt addr = phys addr, so
217	 * we must use __raw_readw() instead of omap_readw().
218	 */
219	api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
220	if (!api_ck_was_enabled)
221		api_ck_p->ops->enable(api_ck_p);
222	dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
223	if (!api_ck_was_enabled)
224		api_ck_p->ops->disable(api_ck_p);
225
226	return p_rate / dsor;
227}
228
229/* MPU virtual clock functions */
230int omap1_select_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
231{
232	/* Find the highest supported frequency <= rate and switch to it */
233	struct mpu_rate * ptr;
234	unsigned long ref_rate;
235
236	ref_rate = ck_ref_p->rate;
237
238	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
239		if (!(ptr->flags & cpu_mask))
240			continue;
241
242		if (ptr->xtal != ref_rate)
243			continue;
244
245		/* Can check only after xtal frequency check */
246		if (ptr->rate <= rate)
247			break;
248	}
249
250	if (!ptr->rate)
251		return -EINVAL;
252
253	/*
254	 * In most cases we should not need to reprogram DPLL.
255	 * Reprogramming the DPLL is tricky, it must be done from SRAM.
256	 */
257	omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
258
259	/* XXX Do we need to recalculate the tree below DPLL1 at this point? */
260	ck_dpll1_p->rate = ptr->pll_rate;
261
262	return 0;
263}
264
265int omap1_clk_set_rate_dsp_domain(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
266{
267	int dsor_exp;
268	u16 regval;
269
270	dsor_exp = calc_dsor_exp(rate, p_rate);
271	if (dsor_exp > 3)
272		dsor_exp = -EINVAL;
273	if (dsor_exp < 0)
274		return dsor_exp;
275
276	regval = __raw_readw(DSP_CKCTL);
277	regval &= ~(3 << clk->rate_offset);
278	regval |= dsor_exp << clk->rate_offset;
279	__raw_writew(regval, DSP_CKCTL);
280	clk->rate = p_rate / (1 << dsor_exp);
281
282	return 0;
283}
284
285long omap1_clk_round_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate,
286				    unsigned long *p_rate)
287{
288	int dsor_exp = calc_dsor_exp(rate, *p_rate);
289
290	if (dsor_exp < 0)
291		return dsor_exp;
292	if (dsor_exp > 3)
293		dsor_exp = 3;
294	return *p_rate / (1 << dsor_exp);
295}
296
297int omap1_clk_set_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
298{
299	unsigned long flags;
300	int dsor_exp;
301	u16 regval;
302
303	dsor_exp = calc_dsor_exp(rate, p_rate);
304	if (dsor_exp > 3)
305		dsor_exp = -EINVAL;
306	if (dsor_exp < 0)
307		return dsor_exp;
308
309	/* protect ARM_CKCTL register from concurrent access via clk_enable/disable() */
310	spin_lock_irqsave(&arm_ckctl_lock, flags);
311
312	regval = omap_readw(ARM_CKCTL);
313	regval &= ~(3 << clk->rate_offset);
314	regval |= dsor_exp << clk->rate_offset;
315	regval = verify_ckctl_value(regval);
316	omap_writew(regval, ARM_CKCTL);
317	clk->rate = p_rate / (1 << dsor_exp);
318
319	spin_unlock_irqrestore(&arm_ckctl_lock, flags);
320
321	return 0;
322}
323
324long omap1_round_to_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
325{
326	/* Find the highest supported frequency <= rate */
327	struct mpu_rate * ptr;
328	long highest_rate;
329	unsigned long ref_rate;
330
331	ref_rate = ck_ref_p->rate;
332
333	highest_rate = -EINVAL;
334
335	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
336		if (!(ptr->flags & cpu_mask))
337			continue;
338
339		if (ptr->xtal != ref_rate)
340			continue;
341
342		highest_rate = ptr->rate;
343
344		/* Can check only after xtal frequency check */
345		if (ptr->rate <= rate)
346			break;
347	}
348
349	return highest_rate;
350}
351
352static unsigned calc_ext_dsor(unsigned long rate)
353{
354	unsigned dsor;
355
356	/* MCLK and BCLK divisor selection is not linear:
357	 * freq = 96MHz / dsor
358	 *
359	 * RATIO_SEL range: dsor <-> RATIO_SEL
360	 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
361	 * 6..48:  (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
362	 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
363	 * can not be used.
364	 */
365	for (dsor = 2; dsor < 96; ++dsor) {
366		if ((dsor & 1) && dsor > 8)
367			continue;
368		if (rate >= 96000000 / dsor)
369			break;
370	}
371	return dsor;
372}
373
374/* XXX Only needed on 1510 */
375long omap1_round_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
376{
377	return rate > 24000000 ? 48000000 : 12000000;
378}
379
380int omap1_set_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
381{
382	unsigned long flags;
383	unsigned int val;
384
 
385	if (rate == 12000000)
386		val = 0;
387	else if (rate == 48000000)
388		val = 1 << clk->enable_bit;
389	else
390		return -EINVAL;
391
392	/* protect MOD_CONF_CTRL_0 register from concurrent access via clk_enable/disable() */
393	spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
394
395	val |= __raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit);
396	__raw_writel(val, clk->enable_reg);
397
398	spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
399
400	clk->rate = rate;
401
402	return 0;
403}
404
405/* External clock (MCLK & BCLK) functions */
406int omap1_set_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
407{
408	unsigned long flags;
409	unsigned dsor;
410	__u16 ratio_bits;
411
412	dsor = calc_ext_dsor(rate);
413	clk->rate = 96000000 / dsor;
414	if (dsor > 8)
415		ratio_bits = ((dsor - 8) / 2 + 6) << 2;
416	else
417		ratio_bits = (dsor - 2) << 2;
418
419	/* protect SWD_CLK_DIV_CTRL_SEL register from concurrent access via clk_enable/disable() */
420	spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
421
422	ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
423	__raw_writew(ratio_bits, clk->enable_reg);
424
425	spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
426
427	return 0;
428}
429
430static int calc_div_sossi(unsigned long rate, unsigned long p_rate)
431{
 
432	int div;
 
433
 
434	/* Round towards slower frequency */
435	div = (p_rate + rate - 1) / rate;
436
437	return --div;
438}
439
440long omap1_round_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
441{
442	int div;
443
444	div = calc_div_sossi(rate, *p_rate);
445	if (div < 0)
446		div = 0;
447	else if (div > 7)
448		div = 7;
449
450	return *p_rate / (div + 1);
451}
452
453int omap1_set_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
454{
455	unsigned long flags;
456	u32 l;
457	int div;
458
459	div = calc_div_sossi(rate, p_rate);
460	if (div < 0 || div > 7)
461		return -EINVAL;
462
463	/* protect MOD_CONF_CTRL_1 register from concurrent access via clk_enable/disable() */
464	spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
465
466	l = omap_readl(MOD_CONF_CTRL_1);
467	l &= ~(7 << 17);
468	l |= div << 17;
469	omap_writel(l, MOD_CONF_CTRL_1);
470
471	clk->rate = p_rate / (div + 1);
472
473	spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
474
475	return 0;
476}
477
478long omap1_round_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
479{
480	return 96000000 / calc_ext_dsor(rate);
481}
482
483int omap1_init_ext_clk(struct omap1_clk *clk)
484{
485	unsigned dsor;
486	__u16 ratio_bits;
487
488	/* Determine current rate and ensure clock is based on 96MHz APLL */
489	ratio_bits = __raw_readw(clk->enable_reg) & ~1;
490	__raw_writew(ratio_bits, clk->enable_reg);
491
492	ratio_bits = (ratio_bits & 0xfc) >> 2;
493	if (ratio_bits > 6)
494		dsor = (ratio_bits - 6) * 2 + 8;
495	else
496		dsor = ratio_bits + 2;
497
498	clk-> rate = 96000000 / dsor;
499
500	return 0;
501}
502
503static int omap1_clk_enable(struct clk_hw *hw)
504{
505	struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
506	int ret = 0;
507
508	if (parent && clk->flags & CLOCK_NO_IDLE_PARENT)
509		omap1_clk_deny_idle(parent);
 
 
 
 
 
 
 
510
511	if (clk->ops && !(WARN_ON(!clk->ops->enable)))
512		ret = clk->ops->enable(clk);
 
 
 
 
 
 
 
513
 
 
514	return ret;
515}
516
517static void omap1_clk_disable(struct clk_hw *hw)
518{
519	struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
520
521	if (clk->ops && !(WARN_ON(!clk->ops->disable)))
522		clk->ops->disable(clk);
523
524	if (likely(parent) && clk->flags & CLOCK_NO_IDLE_PARENT)
525		omap1_clk_allow_idle(parent);
 
 
 
526}
527
528static int omap1_clk_enable_generic(struct omap1_clk *clk)
529{
530	unsigned long flags;
531	__u16 regval16;
532	__u32 regval32;
533
534	if (unlikely(clk->enable_reg == NULL)) {
535		printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
536		       clk_hw_get_name(&clk->hw));
537		return -EINVAL;
538	}
539
540	/* protect clk->enable_reg from concurrent access via clk_set_rate() */
541	if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
542		spin_lock_irqsave(&arm_ckctl_lock, flags);
543	else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
544		spin_lock_irqsave(&arm_idlect2_lock, flags);
545	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
546		spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
547	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
548		spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
549	else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
550		spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
551
552	if (clk->flags & ENABLE_REG_32BIT) {
553		regval32 = __raw_readl(clk->enable_reg);
554		regval32 |= (1 << clk->enable_bit);
555		__raw_writel(regval32, clk->enable_reg);
556	} else {
557		regval16 = __raw_readw(clk->enable_reg);
558		regval16 |= (1 << clk->enable_bit);
559		__raw_writew(regval16, clk->enable_reg);
560	}
561
562	if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
563		spin_unlock_irqrestore(&arm_ckctl_lock, flags);
564	else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
565		spin_unlock_irqrestore(&arm_idlect2_lock, flags);
566	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
567		spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
568	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
569		spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
570	else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
571		spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
572
573	return 0;
574}
575
576static void omap1_clk_disable_generic(struct omap1_clk *clk)
577{
578	unsigned long flags;
579	__u16 regval16;
580	__u32 regval32;
581
582	if (clk->enable_reg == NULL)
583		return;
584
585	/* protect clk->enable_reg from concurrent access via clk_set_rate() */
586	if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
587		spin_lock_irqsave(&arm_ckctl_lock, flags);
588	else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
589		spin_lock_irqsave(&arm_idlect2_lock, flags);
590	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
591		spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
592	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
593		spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
594	else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
595		spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
596
597	if (clk->flags & ENABLE_REG_32BIT) {
598		regval32 = __raw_readl(clk->enable_reg);
599		regval32 &= ~(1 << clk->enable_bit);
600		__raw_writel(regval32, clk->enable_reg);
601	} else {
602		regval16 = __raw_readw(clk->enable_reg);
603		regval16 &= ~(1 << clk->enable_bit);
604		__raw_writew(regval16, clk->enable_reg);
605	}
606
607	if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
608		spin_unlock_irqrestore(&arm_ckctl_lock, flags);
609	else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
610		spin_unlock_irqrestore(&arm_idlect2_lock, flags);
611	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
612		spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
613	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
614		spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
615	else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
616		spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
617}
618
619const struct clkops clkops_generic = {
620	.enable		= omap1_clk_enable_generic,
621	.disable	= omap1_clk_disable_generic,
622};
623
624static int omap1_clk_enable_dsp_domain(struct omap1_clk *clk)
625{
626	bool api_ck_was_enabled;
627	int retval = 0;
628
629	api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
630	if (!api_ck_was_enabled)
631		retval = api_ck_p->ops->enable(api_ck_p);
632
 
633	if (!retval) {
634		retval = omap1_clk_enable_generic(clk);
635
636		if (!api_ck_was_enabled)
637			api_ck_p->ops->disable(api_ck_p);
638	}
639
640	return retval;
641}
642
643static void omap1_clk_disable_dsp_domain(struct omap1_clk *clk)
644{
645	bool api_ck_was_enabled;
646
647	api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
648	if (!api_ck_was_enabled)
649		if (api_ck_p->ops->enable(api_ck_p) < 0)
650			return;
651
652	omap1_clk_disable_generic(clk);
653
654	if (!api_ck_was_enabled)
655		api_ck_p->ops->disable(api_ck_p);
656}
657
658const struct clkops clkops_dspck = {
659	.enable		= omap1_clk_enable_dsp_domain,
660	.disable	= omap1_clk_disable_dsp_domain,
661};
662
663/* XXX SYSC register handling does not belong in the clock framework */
664static int omap1_clk_enable_uart_functional_16xx(struct omap1_clk *clk)
665{
666	int ret;
667	struct uart_clk *uclk;
668
669	ret = omap1_clk_enable_generic(clk);
670	if (ret == 0) {
671		/* Set smart idle acknowledgement mode */
672		uclk = (struct uart_clk *)clk;
673		omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
674			    uclk->sysc_addr);
675	}
676
677	return ret;
678}
679
680/* XXX SYSC register handling does not belong in the clock framework */
681static void omap1_clk_disable_uart_functional_16xx(struct omap1_clk *clk)
682{
683	struct uart_clk *uclk;
684
685	/* Set force idle acknowledgement mode */
686	uclk = (struct uart_clk *)clk;
687	omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
688
689	omap1_clk_disable_generic(clk);
690}
691
692/* XXX SYSC register handling does not belong in the clock framework */
693const struct clkops clkops_uart_16xx = {
694	.enable		= omap1_clk_enable_uart_functional_16xx,
695	.disable	= omap1_clk_disable_uart_functional_16xx,
696};
697
698static unsigned long omap1_clk_recalc_rate(struct clk_hw *hw, unsigned long p_rate)
699{
700	struct omap1_clk *clk = to_omap1_clk(hw);
701
702	if (clk->recalc)
703		return clk->recalc(clk, p_rate);
704
705	return clk->rate;
706}
707
708static long omap1_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *p_rate)
709{
710	struct omap1_clk *clk = to_omap1_clk(hw);
711
712	if (clk->round_rate != NULL)
713		return clk->round_rate(clk, rate, p_rate);
714
715	return omap1_clk_recalc_rate(hw, *p_rate);
716}
717
718static int omap1_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long p_rate)
719{
720	struct omap1_clk *clk = to_omap1_clk(hw);
721	int  ret = -EINVAL;
722
723	if (clk->set_rate)
724		ret = clk->set_rate(clk, rate, p_rate);
725	return ret;
726}
727
728/*
729 * Omap1 clock reset and init functions
730 */
731
732static int omap1_clk_init_op(struct clk_hw *hw)
733{
734	struct omap1_clk *clk = to_omap1_clk(hw);
735
736	if (clk->init)
737		return clk->init(clk);
738
739	return 0;
740}
741
742#ifdef CONFIG_OMAP_RESET_CLOCKS
743
744static void omap1_clk_disable_unused(struct clk_hw *hw)
745{
746	struct omap1_clk *clk = to_omap1_clk(hw);
747	const char *name = clk_hw_get_name(hw);
748
749	/* Clocks in the DSP domain need api_ck. Just assume bootloader
750	 * has not enabled any DSP clocks */
751	if (clk->enable_reg == DSP_IDLECT2) {
752		pr_info("Skipping reset check for DSP domain clock \"%s\"\n", name);
 
753		return;
754	}
755
756	pr_info("Disabling unused clock \"%s\"... ", name);
757	omap1_clk_disable(hw);
 
 
 
 
 
 
 
 
 
758	printk(" done\n");
759}
760
761#endif
762
763const struct clk_ops omap1_clk_gate_ops = {
764	.enable		= omap1_clk_enable,
765	.disable	= omap1_clk_disable,
766	.is_enabled	= omap1_clk_is_enabled,
767#ifdef CONFIG_OMAP_RESET_CLOCKS
768	.disable_unused	= omap1_clk_disable_unused,
769#endif
770};
771
772const struct clk_ops omap1_clk_rate_ops = {
773	.recalc_rate	= omap1_clk_recalc_rate,
774	.round_rate	= omap1_clk_round_rate,
775	.set_rate	= omap1_clk_set_rate,
776	.init		= omap1_clk_init_op,
777};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
778
779const struct clk_ops omap1_clk_full_ops = {
780	.enable		= omap1_clk_enable,
781	.disable	= omap1_clk_disable,
782	.is_enabled	= omap1_clk_is_enabled,
783#ifdef CONFIG_OMAP_RESET_CLOCKS
784	.disable_unused	= omap1_clk_disable_unused,
785#endif
786	.recalc_rate	= omap1_clk_recalc_rate,
787	.round_rate	= omap1_clk_round_rate,
788	.set_rate	= omap1_clk_set_rate,
789	.init		= omap1_clk_init_op,
790};
791
792/*
793 * OMAP specific clock functions shared between omap1 and omap2
794 */
795
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
796/* Used for clocks that always have same value as the parent clock */
797unsigned long followparent_recalc(struct omap1_clk *clk, unsigned long p_rate)
798{
799	return p_rate;
800}
801
802/*
803 * Used for clocks that have the same value as the parent clock,
804 * divided by some factor
805 */
806unsigned long omap_fixed_divisor_recalc(struct omap1_clk *clk, unsigned long p_rate)
807{
808	WARN_ON(!clk->fixed_div);
809
810	return p_rate / clk->fixed_div;
 
 
 
 
 
 
 
 
 
 
 
811}
812
813/* Propagate rate to children */
814void propagate_rate(struct omap1_clk *tclk)
815{
816	struct clk *clkp;
817
818	/* depend on CCF ability to recalculate new rates across whole clock subtree */
819	if (WARN_ON(!(clk_hw_get_flags(&tclk->hw) & CLK_GET_RATE_NOCACHE)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
820		return;
821
822	clkp = clk_get_sys(NULL, clk_hw_get_name(&tclk->hw));
823	if (WARN_ON(!clkp))
824		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
825
826	clk_get_rate(clkp);
827	clk_put(clkp);
828}
829
830const struct clk_ops omap1_clk_null_ops = {
 
 
831};
832
833/*
834 * Dummy clock
835 *
836 * Used for clock aliases that are needed on some OMAPs, but not others
837 */
838struct omap1_clk dummy_ck __refdata = {
839	.hw.init	= CLK_HW_INIT_NO_PARENT("dummy", &omap1_clk_null_ops, 0),
 
840};