Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * Copyright (C) 2013 Broadcom Corporation
   3 * Copyright 2013 Linaro Limited
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation version 2.
   8 *
   9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  10 * kind, whether express or implied; without even the implied warranty
  11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 */
  14
  15#include "clk-kona.h"
  16
  17#include <linux/delay.h>
  18#include <linux/io.h>
  19#include <linux/kernel.h>
  20#include <linux/clk-provider.h>
  21
  22/*
  23 * "Policies" affect the frequencies of bus clocks provided by a
  24 * CCU.  (I believe these polices are named "Deep Sleep", "Economy",
  25 * "Normal", and "Turbo".)  A lower policy number has lower power
  26 * consumption, and policy 2 is the default.
  27 */
  28#define CCU_POLICY_COUNT	4
  29
  30#define CCU_ACCESS_PASSWORD      0xA5A500
  31#define CLK_GATE_DELAY_LOOP      2000
  32
  33/* Bitfield operations */
  34
  35/* Produces a mask of set bits covering a range of a 32-bit value */
  36static inline u32 bitfield_mask(u32 shift, u32 width)
  37{
  38	return ((1 << width) - 1) << shift;
  39}
  40
  41/* Extract the value of a bitfield found within a given register value */
  42static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
  43{
  44	return (reg_val & bitfield_mask(shift, width)) >> shift;
  45}
  46
  47/* Replace the value of a bitfield found within a given register value */
  48static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
  49{
  50	u32 mask = bitfield_mask(shift, width);
  51
  52	return (reg_val & ~mask) | (val << shift);
  53}
  54
  55/* Divider and scaling helpers */
  56
  57/* Convert a divider into the scaled divisor value it represents. */
  58static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
  59{
  60	return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
  61}
  62
  63/*
  64 * Build a scaled divider value as close as possible to the
  65 * given whole part (div_value) and fractional part (expressed
  66 * in billionths).
  67 */
  68u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
  69{
  70	u64 combined;
  71
  72	BUG_ON(!div_value);
  73	BUG_ON(billionths >= BILLION);
  74
  75	combined = (u64)div_value * BILLION + billionths;
  76	combined <<= div->u.s.frac_width;
  77
  78	return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
  79}
  80
  81/* The scaled minimum divisor representable by a divider */
  82static inline u64
  83scaled_div_min(struct bcm_clk_div *div)
  84{
  85	if (divider_is_fixed(div))
  86		return (u64)div->u.fixed;
  87
  88	return scaled_div_value(div, 0);
  89}
  90
  91/* The scaled maximum divisor representable by a divider */
  92u64 scaled_div_max(struct bcm_clk_div *div)
  93{
  94	u32 reg_div;
  95
  96	if (divider_is_fixed(div))
  97		return (u64)div->u.fixed;
  98
  99	reg_div = ((u32)1 << div->u.s.width) - 1;
 100
 101	return scaled_div_value(div, reg_div);
 102}
 103
 104/*
 105 * Convert a scaled divisor into its divider representation as
 106 * stored in a divider register field.
 107 */
 108static inline u32
 109divider(struct bcm_clk_div *div, u64 scaled_div)
 110{
 111	BUG_ON(scaled_div < scaled_div_min(div));
 112	BUG_ON(scaled_div > scaled_div_max(div));
 113
 114	return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
 115}
 116
 117/* Return a rate scaled for use when dividing by a scaled divisor. */
 118static inline u64
 119scale_rate(struct bcm_clk_div *div, u32 rate)
 120{
 121	if (divider_is_fixed(div))
 122		return (u64)rate;
 123
 124	return (u64)rate << div->u.s.frac_width;
 125}
 126
 127/* CCU access */
 128
 129/* Read a 32-bit register value from a CCU's address space. */
 130static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
 131{
 132	return readl(ccu->base + reg_offset);
 133}
 134
 135/* Write a 32-bit register value into a CCU's address space. */
 136static inline void
 137__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
 138{
 139	writel(reg_val, ccu->base + reg_offset);
 140}
 141
 142static inline unsigned long ccu_lock(struct ccu_data *ccu)
 143{
 144	unsigned long flags;
 145
 146	spin_lock_irqsave(&ccu->lock, flags);
 147
 148	return flags;
 149}
 150static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
 151{
 152	spin_unlock_irqrestore(&ccu->lock, flags);
 153}
 154
 155/*
 156 * Enable/disable write access to CCU protected registers.  The
 157 * WR_ACCESS register for all CCUs is at offset 0.
 158 */
 159static inline void __ccu_write_enable(struct ccu_data *ccu)
 160{
 161	if (ccu->write_enabled) {
 162		pr_err("%s: access already enabled for %s\n", __func__,
 163			ccu->name);
 164		return;
 165	}
 166	ccu->write_enabled = true;
 167	__ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
 168}
 169
 170static inline void __ccu_write_disable(struct ccu_data *ccu)
 171{
 172	if (!ccu->write_enabled) {
 173		pr_err("%s: access wasn't enabled for %s\n", __func__,
 174			ccu->name);
 175		return;
 176	}
 177
 178	__ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
 179	ccu->write_enabled = false;
 180}
 181
 182/*
 183 * Poll a register in a CCU's address space, returning when the
 184 * specified bit in that register's value is set (or clear).  Delay
 185 * a microsecond after each read of the register.  Returns true if
 186 * successful, or false if we gave up trying.
 187 *
 188 * Caller must ensure the CCU lock is held.
 189 */
 190static inline bool
 191__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
 192{
 193	unsigned int tries;
 194	u32 bit_mask = 1 << bit;
 195
 196	for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
 197		u32 val;
 198		bool bit_val;
 199
 200		val = __ccu_read(ccu, reg_offset);
 201		bit_val = (val & bit_mask) != 0;
 202		if (bit_val == want)
 203			return true;
 204		udelay(1);
 205	}
 206	pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__,
 207		ccu->name, reg_offset, bit, want ? "set" : "clear");
 208
 209	return false;
 210}
 211
 212/* Policy operations */
 213
 214static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync)
 215{
 216	struct bcm_policy_ctl *control = &ccu->policy.control;
 217	u32 offset;
 218	u32 go_bit;
 219	u32 mask;
 220	bool ret;
 221
 222	/* If we don't need to control policy for this CCU, we're done. */
 223	if (!policy_ctl_exists(control))
 224		return true;
 225
 226	offset = control->offset;
 227	go_bit = control->go_bit;
 228
 229	/* Ensure we're not busy before we start */
 230	ret = __ccu_wait_bit(ccu, offset, go_bit, false);
 231	if (!ret) {
 232		pr_err("%s: ccu %s policy engine wouldn't go idle\n",
 233			__func__, ccu->name);
 234		return false;
 235	}
 236
 237	/*
 238	 * If it's a synchronous request, we'll wait for the voltage
 239	 * and frequency of the active load to stabilize before
 240	 * returning.  To do this we select the active load by
 241	 * setting the ATL bit.
 242	 *
 243	 * An asynchronous request instead ramps the voltage in the
 244	 * background, and when that process stabilizes, the target
 245	 * load is copied to the active load and the CCU frequency
 246	 * is switched.  We do this by selecting the target load
 247	 * (ATL bit clear) and setting the request auto-copy (AC bit
 248	 * set).
 249	 *
 250	 * Note, we do NOT read-modify-write this register.
 251	 */
 252	mask = (u32)1 << go_bit;
 253	if (sync)
 254		mask |= 1 << control->atl_bit;
 255	else
 256		mask |= 1 << control->ac_bit;
 257	__ccu_write(ccu, offset, mask);
 258
 259	/* Wait for indication that operation is complete. */
 260	ret = __ccu_wait_bit(ccu, offset, go_bit, false);
 261	if (!ret)
 262		pr_err("%s: ccu %s policy engine never started\n",
 263			__func__, ccu->name);
 264
 265	return ret;
 266}
 267
 268static bool __ccu_policy_engine_stop(struct ccu_data *ccu)
 269{
 270	struct bcm_lvm_en *enable = &ccu->policy.enable;
 271	u32 offset;
 272	u32 enable_bit;
 273	bool ret;
 274
 275	/* If we don't need to control policy for this CCU, we're done. */
 276	if (!policy_lvm_en_exists(enable))
 277		return true;
 278
 279	/* Ensure we're not busy before we start */
 280	offset = enable->offset;
 281	enable_bit = enable->bit;
 282	ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
 283	if (!ret) {
 284		pr_err("%s: ccu %s policy engine already stopped\n",
 285			__func__, ccu->name);
 286		return false;
 287	}
 288
 289	/* Now set the bit to stop the engine (NO read-modify-write) */
 290	__ccu_write(ccu, offset, (u32)1 << enable_bit);
 291
 292	/* Wait for indication that it has stopped. */
 293	ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
 294	if (!ret)
 295		pr_err("%s: ccu %s policy engine never stopped\n",
 296			__func__, ccu->name);
 297
 298	return ret;
 299}
 300
 301/*
 302 * A CCU has four operating conditions ("policies"), and some clocks
 303 * can be disabled or enabled based on which policy is currently in
 304 * effect.  Such clocks have a bit in a "policy mask" register for
 305 * each policy indicating whether the clock is enabled for that
 306 * policy or not.  The bit position for a clock is the same for all
 307 * four registers, and the 32-bit registers are at consecutive
 308 * addresses.
 309 */
 310static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy)
 311{
 312	u32 offset;
 313	u32 mask;
 314	int i;
 315	bool ret;
 316
 317	if (!policy_exists(policy))
 318		return true;
 319
 320	/*
 321	 * We need to stop the CCU policy engine to allow update
 322	 * of our policy bits.
 323	 */
 324	if (!__ccu_policy_engine_stop(ccu)) {
 325		pr_err("%s: unable to stop CCU %s policy engine\n",
 326			__func__, ccu->name);
 327		return false;
 328	}
 329
 330	/*
 331	 * For now, if a clock defines its policy bit we just mark
 332	 * it "enabled" for all four policies.
 333	 */
 334	offset = policy->offset;
 335	mask = (u32)1 << policy->bit;
 336	for (i = 0; i < CCU_POLICY_COUNT; i++) {
 337		u32 reg_val;
 338
 339		reg_val = __ccu_read(ccu, offset);
 340		reg_val |= mask;
 341		__ccu_write(ccu, offset, reg_val);
 342		offset += sizeof(u32);
 343	}
 344
 345	/* We're done updating; fire up the policy engine again. */
 346	ret = __ccu_policy_engine_start(ccu, true);
 347	if (!ret)
 348		pr_err("%s: unable to restart CCU %s policy engine\n",
 349			__func__, ccu->name);
 350
 351	return ret;
 352}
 353
 354/* Gate operations */
 355
 356/* Determine whether a clock is gated.  CCU lock must be held.  */
 357static bool
 358__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
 359{
 360	u32 bit_mask;
 361	u32 reg_val;
 362
 363	/* If there is no gate we can assume it's enabled. */
 364	if (!gate_exists(gate))
 365		return true;
 366
 367	bit_mask = 1 << gate->status_bit;
 368	reg_val = __ccu_read(ccu, gate->offset);
 369
 370	return (reg_val & bit_mask) != 0;
 371}
 372
 373/* Determine whether a clock is gated. */
 374static bool
 375is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
 376{
 377	long flags;
 378	bool ret;
 379
 380	/* Avoid taking the lock if we can */
 381	if (!gate_exists(gate))
 382		return true;
 383
 384	flags = ccu_lock(ccu);
 385	ret = __is_clk_gate_enabled(ccu, gate);
 386	ccu_unlock(ccu, flags);
 387
 388	return ret;
 389}
 390
 391/*
 392 * Commit our desired gate state to the hardware.
 393 * Returns true if successful, false otherwise.
 394 */
 395static bool
 396__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
 397{
 398	u32 reg_val;
 399	u32 mask;
 400	bool enabled = false;
 401
 402	BUG_ON(!gate_exists(gate));
 403	if (!gate_is_sw_controllable(gate))
 404		return true;		/* Nothing we can change */
 405
 406	reg_val = __ccu_read(ccu, gate->offset);
 407
 408	/* For a hardware/software gate, set which is in control */
 409	if (gate_is_hw_controllable(gate)) {
 410		mask = (u32)1 << gate->hw_sw_sel_bit;
 411		if (gate_is_sw_managed(gate))
 412			reg_val |= mask;
 413		else
 414			reg_val &= ~mask;
 415	}
 416
 417	/*
 418	 * If software is in control, enable or disable the gate.
 419	 * If hardware is, clear the enabled bit for good measure.
 420	 * If a software controlled gate can't be disabled, we're
 421	 * required to write a 0 into the enable bit (but the gate
 422	 * will be enabled).
 423	 */
 424	mask = (u32)1 << gate->en_bit;
 425	if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
 426			!gate_is_no_disable(gate))
 427		reg_val |= mask;
 428	else
 429		reg_val &= ~mask;
 430
 431	__ccu_write(ccu, gate->offset, reg_val);
 432
 433	/* For a hardware controlled gate, we're done */
 434	if (!gate_is_sw_managed(gate))
 435		return true;
 436
 437	/* Otherwise wait for the gate to be in desired state */
 438	return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
 439}
 440
 441/*
 442 * Initialize a gate.  Our desired state (hardware/software select,
 443 * and if software, its enable state) is committed to hardware
 444 * without the usual checks to see if it's already set up that way.
 445 * Returns true if successful, false otherwise.
 446 */
 447static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
 448{
 449	if (!gate_exists(gate))
 450		return true;
 451	return __gate_commit(ccu, gate);
 452}
 453
 454/*
 455 * Set a gate to enabled or disabled state.  Does nothing if the
 456 * gate is not currently under software control, or if it is already
 457 * in the requested state.  Returns true if successful, false
 458 * otherwise.  CCU lock must be held.
 459 */
 460static bool
 461__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
 462{
 463	bool ret;
 464
 465	if (!gate_exists(gate) || !gate_is_sw_managed(gate))
 466		return true;	/* Nothing to do */
 467
 468	if (!enable && gate_is_no_disable(gate)) {
 469		pr_warn("%s: invalid gate disable request (ignoring)\n",
 470			__func__);
 471		return true;
 472	}
 473
 474	if (enable == gate_is_enabled(gate))
 475		return true;	/* No change */
 476
 477	gate_flip_enabled(gate);
 478	ret = __gate_commit(ccu, gate);
 479	if (!ret)
 480		gate_flip_enabled(gate);	/* Revert the change */
 481
 482	return ret;
 483}
 484
 485/* Enable or disable a gate.  Returns 0 if successful, -EIO otherwise */
 486static int clk_gate(struct ccu_data *ccu, const char *name,
 487			struct bcm_clk_gate *gate, bool enable)
 488{
 489	unsigned long flags;
 490	bool success;
 491
 492	/*
 493	 * Avoid taking the lock if we can.  We quietly ignore
 494	 * requests to change state that don't make sense.
 495	 */
 496	if (!gate_exists(gate) || !gate_is_sw_managed(gate))
 497		return 0;
 498	if (!enable && gate_is_no_disable(gate))
 499		return 0;
 500
 501	flags = ccu_lock(ccu);
 502	__ccu_write_enable(ccu);
 503
 504	success = __clk_gate(ccu, gate, enable);
 505
 506	__ccu_write_disable(ccu);
 507	ccu_unlock(ccu, flags);
 508
 509	if (success)
 510		return 0;
 511
 512	pr_err("%s: failed to %s gate for %s\n", __func__,
 513		enable ? "enable" : "disable", name);
 514
 515	return -EIO;
 516}
 517
 518/* Hysteresis operations */
 519
 520/*
 521 * If a clock gate requires a turn-off delay it will have
 522 * "hysteresis" register bits defined.  The first, if set, enables
 523 * the delay; and if enabled, the second bit determines whether the
 524 * delay is "low" or "high" (1 means high).  For now, if it's
 525 * defined for a clock, we set it.
 526 */
 527static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst)
 528{
 529	u32 offset;
 530	u32 reg_val;
 531	u32 mask;
 532
 533	if (!hyst_exists(hyst))
 534		return true;
 535
 536	offset = hyst->offset;
 537	mask = (u32)1 << hyst->en_bit;
 538	mask |= (u32)1 << hyst->val_bit;
 539
 540	reg_val = __ccu_read(ccu, offset);
 541	reg_val |= mask;
 542	__ccu_write(ccu, offset, reg_val);
 543
 544	return true;
 545}
 546
 547/* Trigger operations */
 548
 549/*
 550 * Caller must ensure CCU lock is held and access is enabled.
 551 * Returns true if successful, false otherwise.
 552 */
 553static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
 554{
 555	/* Trigger the clock and wait for it to finish */
 556	__ccu_write(ccu, trig->offset, 1 << trig->bit);
 557
 558	return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
 559}
 560
 561/* Divider operations */
 562
 563/* Read a divider value and return the scaled divisor it represents. */
 564static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
 565{
 566	unsigned long flags;
 567	u32 reg_val;
 568	u32 reg_div;
 569
 570	if (divider_is_fixed(div))
 571		return (u64)div->u.fixed;
 572
 573	flags = ccu_lock(ccu);
 574	reg_val = __ccu_read(ccu, div->u.s.offset);
 575	ccu_unlock(ccu, flags);
 576
 577	/* Extract the full divider field from the register value */
 578	reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
 579
 580	/* Return the scaled divisor value it represents */
 581	return scaled_div_value(div, reg_div);
 582}
 583
 584/*
 585 * Convert a divider's scaled divisor value into its recorded form
 586 * and commit it into the hardware divider register.
 587 *
 588 * Returns 0 on success.  Returns -EINVAL for invalid arguments.
 589 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
 590 */
 591static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 592			struct bcm_clk_div *div, struct bcm_clk_trig *trig)
 593{
 594	bool enabled;
 595	u32 reg_div;
 596	u32 reg_val;
 597	int ret = 0;
 598
 599	BUG_ON(divider_is_fixed(div));
 600
 601	/*
 602	 * If we're just initializing the divider, and no initial
 603	 * state was defined in the device tree, we just find out
 604	 * what its current value is rather than updating it.
 605	 */
 606	if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
 607		reg_val = __ccu_read(ccu, div->u.s.offset);
 608		reg_div = bitfield_extract(reg_val, div->u.s.shift,
 609						div->u.s.width);
 610		div->u.s.scaled_div = scaled_div_value(div, reg_div);
 611
 612		return 0;
 613	}
 614
 615	/* Convert the scaled divisor to the value we need to record */
 616	reg_div = divider(div, div->u.s.scaled_div);
 617
 618	/* Clock needs to be enabled before changing the rate */
 619	enabled = __is_clk_gate_enabled(ccu, gate);
 620	if (!enabled && !__clk_gate(ccu, gate, true)) {
 621		ret = -ENXIO;
 622		goto out;
 623	}
 624
 625	/* Replace the divider value and record the result */
 626	reg_val = __ccu_read(ccu, div->u.s.offset);
 627	reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
 628					reg_div);
 629	__ccu_write(ccu, div->u.s.offset, reg_val);
 630
 631	/* If the trigger fails we still want to disable the gate */
 632	if (!__clk_trigger(ccu, trig))
 633		ret = -EIO;
 634
 635	/* Disable the clock again if it was disabled to begin with */
 636	if (!enabled && !__clk_gate(ccu, gate, false))
 637		ret = ret ? ret : -ENXIO;	/* return first error */
 638out:
 639	return ret;
 640}
 641
 642/*
 643 * Initialize a divider by committing our desired state to hardware
 644 * without the usual checks to see if it's already set up that way.
 645 * Returns true if successful, false otherwise.
 646 */
 647static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 648			struct bcm_clk_div *div, struct bcm_clk_trig *trig)
 649{
 650	if (!divider_exists(div) || divider_is_fixed(div))
 651		return true;
 652	return !__div_commit(ccu, gate, div, trig);
 653}
 654
 655static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 656			struct bcm_clk_div *div, struct bcm_clk_trig *trig,
 657			u64 scaled_div)
 658{
 659	unsigned long flags;
 660	u64 previous;
 661	int ret;
 662
 663	BUG_ON(divider_is_fixed(div));
 664
 665	previous = div->u.s.scaled_div;
 666	if (previous == scaled_div)
 667		return 0;	/* No change */
 668
 669	div->u.s.scaled_div = scaled_div;
 670
 671	flags = ccu_lock(ccu);
 672	__ccu_write_enable(ccu);
 673
 674	ret = __div_commit(ccu, gate, div, trig);
 675
 676	__ccu_write_disable(ccu);
 677	ccu_unlock(ccu, flags);
 678
 679	if (ret)
 680		div->u.s.scaled_div = previous;		/* Revert the change */
 681
 682	return ret;
 683
 684}
 685
 686/* Common clock rate helpers */
 687
 688/*
 689 * Implement the common clock framework recalc_rate method, taking
 690 * into account a divider and an optional pre-divider.  The
 691 * pre-divider register pointer may be NULL.
 692 */
 693static unsigned long clk_recalc_rate(struct ccu_data *ccu,
 694			struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
 695			unsigned long parent_rate)
 696{
 697	u64 scaled_parent_rate;
 698	u64 scaled_div;
 699	u64 result;
 700
 701	if (!divider_exists(div))
 702		return parent_rate;
 703
 704	if (parent_rate > (unsigned long)LONG_MAX)
 705		return 0;	/* actually this would be a caller bug */
 706
 707	/*
 708	 * If there is a pre-divider, divide the scaled parent rate
 709	 * by the pre-divider value first.  In this case--to improve
 710	 * accuracy--scale the parent rate by *both* the pre-divider
 711	 * value and the divider before actually computing the
 712	 * result of the pre-divider.
 713	 *
 714	 * If there's only one divider, just scale the parent rate.
 715	 */
 716	if (pre_div && divider_exists(pre_div)) {
 717		u64 scaled_rate;
 718
 719		scaled_rate = scale_rate(pre_div, parent_rate);
 720		scaled_rate = scale_rate(div, scaled_rate);
 721		scaled_div = divider_read_scaled(ccu, pre_div);
 722		scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
 723							scaled_div);
 724	} else  {
 725		scaled_parent_rate = scale_rate(div, parent_rate);
 726	}
 727
 728	/*
 729	 * Get the scaled divisor value, and divide the scaled
 730	 * parent rate by that to determine this clock's resulting
 731	 * rate.
 732	 */
 733	scaled_div = divider_read_scaled(ccu, div);
 734	result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div);
 735
 736	return (unsigned long)result;
 737}
 738
 739/*
 740 * Compute the output rate produced when a given parent rate is fed
 741 * into two dividers.  The pre-divider can be NULL, and even if it's
 742 * non-null it may be nonexistent.  It's also OK for the divider to
 743 * be nonexistent, and in that case the pre-divider is also ignored.
 744 *
 745 * If scaled_div is non-null, it is used to return the scaled divisor
 746 * value used by the (downstream) divider to produce that rate.
 747 */
 748static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
 749				struct bcm_clk_div *pre_div,
 750				unsigned long rate, unsigned long parent_rate,
 751				u64 *scaled_div)
 752{
 753	u64 scaled_parent_rate;
 754	u64 min_scaled_div;
 755	u64 max_scaled_div;
 756	u64 best_scaled_div;
 757	u64 result;
 758
 759	BUG_ON(!divider_exists(div));
 760	BUG_ON(!rate);
 761	BUG_ON(parent_rate > (u64)LONG_MAX);
 762
 763	/*
 764	 * If there is a pre-divider, divide the scaled parent rate
 765	 * by the pre-divider value first.  In this case--to improve
 766	 * accuracy--scale the parent rate by *both* the pre-divider
 767	 * value and the divider before actually computing the
 768	 * result of the pre-divider.
 769	 *
 770	 * If there's only one divider, just scale the parent rate.
 771	 *
 772	 * For simplicity we treat the pre-divider as fixed (for now).
 773	 */
 774	if (divider_exists(pre_div)) {
 775		u64 scaled_rate;
 776		u64 scaled_pre_div;
 777
 778		scaled_rate = scale_rate(pre_div, parent_rate);
 779		scaled_rate = scale_rate(div, scaled_rate);
 780		scaled_pre_div = divider_read_scaled(ccu, pre_div);
 781		scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
 782							scaled_pre_div);
 783	} else {
 784		scaled_parent_rate = scale_rate(div, parent_rate);
 785	}
 786
 787	/*
 788	 * Compute the best possible divider and ensure it is in
 789	 * range.  A fixed divider can't be changed, so just report
 790	 * the best we can do.
 791	 */
 792	if (!divider_is_fixed(div)) {
 793		best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate,
 794							rate);
 795		min_scaled_div = scaled_div_min(div);
 796		max_scaled_div = scaled_div_max(div);
 797		if (best_scaled_div > max_scaled_div)
 798			best_scaled_div = max_scaled_div;
 799		else if (best_scaled_div < min_scaled_div)
 800			best_scaled_div = min_scaled_div;
 801	} else {
 802		best_scaled_div = divider_read_scaled(ccu, div);
 803	}
 804
 805	/* OK, figure out the resulting rate */
 806	result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div);
 807
 808	if (scaled_div)
 809		*scaled_div = best_scaled_div;
 810
 811	return (long)result;
 812}
 813
 814/* Common clock parent helpers */
 815
 816/*
 817 * For a given parent selector (register field) value, find the
 818 * index into a selector's parent_sel array that contains it.
 819 * Returns the index, or BAD_CLK_INDEX if it's not found.
 820 */
 821static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
 822{
 823	u8 i;
 824
 825	BUG_ON(sel->parent_count > (u32)U8_MAX);
 826	for (i = 0; i < sel->parent_count; i++)
 827		if (sel->parent_sel[i] == parent_sel)
 828			return i;
 829	return BAD_CLK_INDEX;
 830}
 831
 832/*
 833 * Fetch the current value of the selector, and translate that into
 834 * its corresponding index in the parent array we registered with
 835 * the clock framework.
 836 *
 837 * Returns parent array index that corresponds with the value found,
 838 * or BAD_CLK_INDEX if the found value is out of range.
 839 */
 840static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
 841{
 842	unsigned long flags;
 843	u32 reg_val;
 844	u32 parent_sel;
 845	u8 index;
 846
 847	/* If there's no selector, there's only one parent */
 848	if (!selector_exists(sel))
 849		return 0;
 850
 851	/* Get the value in the selector register */
 852	flags = ccu_lock(ccu);
 853	reg_val = __ccu_read(ccu, sel->offset);
 854	ccu_unlock(ccu, flags);
 855
 856	parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
 857
 858	/* Look up that selector's parent array index and return it */
 859	index = parent_index(sel, parent_sel);
 860	if (index == BAD_CLK_INDEX)
 861		pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
 862			__func__, parent_sel, ccu->name, sel->offset);
 863
 864	return index;
 865}
 866
 867/*
 868 * Commit our desired selector value to the hardware.
 869 *
 870 * Returns 0 on success.  Returns -EINVAL for invalid arguments.
 871 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
 872 */
 873static int
 874__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 875			struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
 876{
 877	u32 parent_sel;
 878	u32 reg_val;
 879	bool enabled;
 880	int ret = 0;
 881
 882	BUG_ON(!selector_exists(sel));
 883
 884	/*
 885	 * If we're just initializing the selector, and no initial
 886	 * state was defined in the device tree, we just find out
 887	 * what its current value is rather than updating it.
 888	 */
 889	if (sel->clk_index == BAD_CLK_INDEX) {
 890		u8 index;
 891
 892		reg_val = __ccu_read(ccu, sel->offset);
 893		parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
 894		index = parent_index(sel, parent_sel);
 895		if (index == BAD_CLK_INDEX)
 896			return -EINVAL;
 897		sel->clk_index = index;
 898
 899		return 0;
 900	}
 901
 902	BUG_ON((u32)sel->clk_index >= sel->parent_count);
 903	parent_sel = sel->parent_sel[sel->clk_index];
 904
 905	/* Clock needs to be enabled before changing the parent */
 906	enabled = __is_clk_gate_enabled(ccu, gate);
 907	if (!enabled && !__clk_gate(ccu, gate, true))
 908		return -ENXIO;
 909
 910	/* Replace the selector value and record the result */
 911	reg_val = __ccu_read(ccu, sel->offset);
 912	reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
 913	__ccu_write(ccu, sel->offset, reg_val);
 914
 915	/* If the trigger fails we still want to disable the gate */
 916	if (!__clk_trigger(ccu, trig))
 917		ret = -EIO;
 918
 919	/* Disable the clock again if it was disabled to begin with */
 920	if (!enabled && !__clk_gate(ccu, gate, false))
 921		ret = ret ? ret : -ENXIO;	/* return first error */
 922
 923	return ret;
 924}
 925
 926/*
 927 * Initialize a selector by committing our desired state to hardware
 928 * without the usual checks to see if it's already set up that way.
 929 * Returns true if successful, false otherwise.
 930 */
 931static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 932			struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
 933{
 934	if (!selector_exists(sel))
 935		return true;
 936	return !__sel_commit(ccu, gate, sel, trig);
 937}
 938
 939/*
 940 * Write a new value into a selector register to switch to a
 941 * different parent clock.  Returns 0 on success, or an error code
 942 * (from __sel_commit()) otherwise.
 943 */
 944static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 945			struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
 946			u8 index)
 947{
 948	unsigned long flags;
 949	u8 previous;
 950	int ret;
 951
 952	previous = sel->clk_index;
 953	if (previous == index)
 954		return 0;	/* No change */
 955
 956	sel->clk_index = index;
 957
 958	flags = ccu_lock(ccu);
 959	__ccu_write_enable(ccu);
 960
 961	ret = __sel_commit(ccu, gate, sel, trig);
 962
 963	__ccu_write_disable(ccu);
 964	ccu_unlock(ccu, flags);
 965
 966	if (ret)
 967		sel->clk_index = previous;	/* Revert the change */
 968
 969	return ret;
 970}
 971
 972/* Clock operations */
 973
 974static int kona_peri_clk_enable(struct clk_hw *hw)
 975{
 976	struct kona_clk *bcm_clk = to_kona_clk(hw);
 977	struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 978
 979	return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true);
 980}
 981
 982static void kona_peri_clk_disable(struct clk_hw *hw)
 983{
 984	struct kona_clk *bcm_clk = to_kona_clk(hw);
 985	struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 986
 987	(void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false);
 988}
 989
 990static int kona_peri_clk_is_enabled(struct clk_hw *hw)
 991{
 992	struct kona_clk *bcm_clk = to_kona_clk(hw);
 993	struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 994
 995	return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
 996}
 997
 998static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
 999			unsigned long parent_rate)
1000{
1001	struct kona_clk *bcm_clk = to_kona_clk(hw);
1002	struct peri_clk_data *data = bcm_clk->u.peri;
1003
1004	return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
1005				parent_rate);
1006}
1007
1008static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1009			unsigned long *parent_rate)
1010{
1011	struct kona_clk *bcm_clk = to_kona_clk(hw);
1012	struct bcm_clk_div *div = &bcm_clk->u.peri->div;
1013
1014	if (!divider_exists(div))
1015		return clk_hw_get_rate(hw);
1016
1017	/* Quietly avoid a zero rate */
1018	return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
1019				rate ? rate : 1, *parent_rate, NULL);
1020}
1021
1022static int kona_peri_clk_determine_rate(struct clk_hw *hw,
1023					struct clk_rate_request *req)
1024{
1025	struct kona_clk *bcm_clk = to_kona_clk(hw);
1026	struct clk_hw *current_parent;
1027	unsigned long parent_rate;
1028	unsigned long best_delta;
1029	unsigned long best_rate;
1030	u32 parent_count;
1031	long rate;
1032	u32 which;
1033
1034	/*
1035	 * If there is no other parent to choose, use the current one.
1036	 * Note:  We don't honor (or use) CLK_SET_RATE_NO_REPARENT.
1037	 */
1038	WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
1039	parent_count = (u32)bcm_clk->init_data.num_parents;
1040	if (parent_count < 2) {
1041		rate = kona_peri_clk_round_rate(hw, req->rate,
1042						&req->best_parent_rate);
1043		if (rate < 0)
1044			return rate;
1045
1046		req->rate = rate;
1047		return 0;
1048	}
1049
1050	/* Unless we can do better, stick with current parent */
1051	current_parent = clk_hw_get_parent(hw);
1052	parent_rate = clk_hw_get_rate(current_parent);
1053	best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate);
1054	best_delta = abs(best_rate - req->rate);
1055
1056	/* Check whether any other parent clock can produce a better result */
1057	for (which = 0; which < parent_count; which++) {
1058		struct clk_hw *parent = clk_hw_get_parent_by_index(hw, which);
1059		unsigned long delta;
1060		unsigned long other_rate;
1061
1062		BUG_ON(!parent);
1063		if (parent == current_parent)
1064			continue;
1065
1066		/* We don't support CLK_SET_RATE_PARENT */
1067		parent_rate = clk_hw_get_rate(parent);
1068		other_rate = kona_peri_clk_round_rate(hw, req->rate,
1069						      &parent_rate);
1070		delta = abs(other_rate - req->rate);
1071		if (delta < best_delta) {
1072			best_delta = delta;
1073			best_rate = other_rate;
1074			req->best_parent_hw = parent;
1075			req->best_parent_rate = parent_rate;
1076		}
1077	}
1078
1079	req->rate = best_rate;
1080	return 0;
1081}
1082
1083static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
1084{
1085	struct kona_clk *bcm_clk = to_kona_clk(hw);
1086	struct peri_clk_data *data = bcm_clk->u.peri;
1087	struct bcm_clk_sel *sel = &data->sel;
1088	struct bcm_clk_trig *trig;
1089	int ret;
1090
1091	BUG_ON(index >= sel->parent_count);
1092
1093	/* If there's only one parent we don't require a selector */
1094	if (!selector_exists(sel))
1095		return 0;
1096
1097	/*
1098	 * The regular trigger is used by default, but if there's a
1099	 * pre-trigger we want to use that instead.
1100	 */
1101	trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
1102					       : &data->trig;
1103
1104	ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
1105	if (ret == -ENXIO) {
1106		pr_err("%s: gating failure for %s\n", __func__,
1107			bcm_clk->init_data.name);
1108		ret = -EIO;	/* Don't proliferate weird errors */
1109	} else if (ret == -EIO) {
1110		pr_err("%s: %strigger failed for %s\n", __func__,
1111			trig == &data->pre_trig ? "pre-" : "",
1112			bcm_clk->init_data.name);
1113	}
1114
1115	return ret;
1116}
1117
1118static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
1119{
1120	struct kona_clk *bcm_clk = to_kona_clk(hw);
1121	struct peri_clk_data *data = bcm_clk->u.peri;
1122	u8 index;
1123
1124	index = selector_read_index(bcm_clk->ccu, &data->sel);
1125
1126	/* Not all callers would handle an out-of-range value gracefully */
1127	return index == BAD_CLK_INDEX ? 0 : index;
1128}
1129
1130static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
1131			unsigned long parent_rate)
1132{
1133	struct kona_clk *bcm_clk = to_kona_clk(hw);
1134	struct peri_clk_data *data = bcm_clk->u.peri;
1135	struct bcm_clk_div *div = &data->div;
1136	u64 scaled_div = 0;
1137	int ret;
1138
1139	if (parent_rate > (unsigned long)LONG_MAX)
1140		return -EINVAL;
1141
1142	if (rate == clk_hw_get_rate(hw))
1143		return 0;
1144
1145	if (!divider_exists(div))
1146		return rate == parent_rate ? 0 : -EINVAL;
1147
1148	/*
1149	 * A fixed divider can't be changed.  (Nor can a fixed
1150	 * pre-divider be, but for now we never actually try to
1151	 * change that.)  Tolerate a request for a no-op change.
1152	 */
1153	if (divider_is_fixed(&data->div))
1154		return rate == parent_rate ? 0 : -EINVAL;
1155
1156	/*
1157	 * Get the scaled divisor value needed to achieve a clock
1158	 * rate as close as possible to what was requested, given
1159	 * the parent clock rate supplied.
1160	 */
1161	(void)round_rate(bcm_clk->ccu, div, &data->pre_div,
1162				rate ? rate : 1, parent_rate, &scaled_div);
1163
1164	/*
1165	 * We aren't updating any pre-divider at this point, so
1166	 * we'll use the regular trigger.
1167	 */
1168	ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
1169				&data->trig, scaled_div);
1170	if (ret == -ENXIO) {
1171		pr_err("%s: gating failure for %s\n", __func__,
1172			bcm_clk->init_data.name);
1173		ret = -EIO;	/* Don't proliferate weird errors */
1174	} else if (ret == -EIO) {
1175		pr_err("%s: trigger failed for %s\n", __func__,
1176			bcm_clk->init_data.name);
1177	}
1178
1179	return ret;
1180}
1181
1182struct clk_ops kona_peri_clk_ops = {
1183	.enable = kona_peri_clk_enable,
1184	.disable = kona_peri_clk_disable,
1185	.is_enabled = kona_peri_clk_is_enabled,
1186	.recalc_rate = kona_peri_clk_recalc_rate,
1187	.determine_rate = kona_peri_clk_determine_rate,
1188	.set_parent = kona_peri_clk_set_parent,
1189	.get_parent = kona_peri_clk_get_parent,
1190	.set_rate = kona_peri_clk_set_rate,
1191};
1192
1193/* Put a peripheral clock into its initial state */
1194static bool __peri_clk_init(struct kona_clk *bcm_clk)
1195{
1196	struct ccu_data *ccu = bcm_clk->ccu;
1197	struct peri_clk_data *peri = bcm_clk->u.peri;
1198	const char *name = bcm_clk->init_data.name;
1199	struct bcm_clk_trig *trig;
1200
1201	BUG_ON(bcm_clk->type != bcm_clk_peri);
1202
1203	if (!policy_init(ccu, &peri->policy)) {
1204		pr_err("%s: error initializing policy for %s\n",
1205			__func__, name);
1206		return false;
1207	}
1208	if (!gate_init(ccu, &peri->gate)) {
1209		pr_err("%s: error initializing gate for %s\n", __func__, name);
1210		return false;
1211	}
1212	if (!hyst_init(ccu, &peri->hyst)) {
1213		pr_err("%s: error initializing hyst for %s\n", __func__, name);
1214		return false;
1215	}
1216	if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
1217		pr_err("%s: error initializing divider for %s\n", __func__,
1218			name);
1219		return false;
1220	}
1221
1222	/*
1223	 * For the pre-divider and selector, the pre-trigger is used
1224	 * if it's present, otherwise we just use the regular trigger.
1225	 */
1226	trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
1227					       : &peri->trig;
1228
1229	if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
1230		pr_err("%s: error initializing pre-divider for %s\n", __func__,
1231			name);
1232		return false;
1233	}
1234
1235	if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
1236		pr_err("%s: error initializing selector for %s\n", __func__,
1237			name);
1238		return false;
1239	}
1240
1241	return true;
1242}
1243
1244static bool __kona_clk_init(struct kona_clk *bcm_clk)
1245{
1246	switch (bcm_clk->type) {
1247	case bcm_clk_peri:
1248		return __peri_clk_init(bcm_clk);
1249	default:
1250		BUG();
1251	}
1252	return false;
1253}
1254
1255/* Set a CCU and all its clocks into their desired initial state */
1256bool __init kona_ccu_init(struct ccu_data *ccu)
1257{
1258	unsigned long flags;
1259	unsigned int which;
1260	struct kona_clk *kona_clks = ccu->kona_clks;
1261	bool success = true;
1262
1263	flags = ccu_lock(ccu);
1264	__ccu_write_enable(ccu);
1265
1266	for (which = 0; which < ccu->clk_num; which++) {
1267		struct kona_clk *bcm_clk = &kona_clks[which];
1268
1269		if (!bcm_clk->ccu)
1270			continue;
1271
1272		success &= __kona_clk_init(bcm_clk);
1273	}
1274
1275	__ccu_write_disable(ccu);
1276	ccu_unlock(ccu, flags);
1277	return success;
1278}