Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
  3 *
  4 *  Copyright (C) 2010  Magnus Damm
  5 *  Copyright (C) 2010 - 2012  Paul Mundt
  6 *
  7 * This file is subject to the terms and conditions of the GNU General Public
  8 * License.  See the file "COPYING" in the main directory of this archive
  9 * for more details.
 10 */
 11#include <linux/clk.h>
 12#include <linux/compiler.h>
 13#include <linux/slab.h>
 14#include <linux/io.h>
 15#include <linux/sh_clk.h>
 16
 17#define CPG_CKSTP_BIT	BIT(8)
 18
 19static unsigned int sh_clk_read(struct clk *clk)
 20{
 21	if (clk->flags & CLK_ENABLE_REG_8BIT)
 22		return ioread8(clk->mapped_reg);
 23	else if (clk->flags & CLK_ENABLE_REG_16BIT)
 24		return ioread16(clk->mapped_reg);
 25
 26	return ioread32(clk->mapped_reg);
 27}
 28
 29static void sh_clk_write(int value, struct clk *clk)
 30{
 31	if (clk->flags & CLK_ENABLE_REG_8BIT)
 32		iowrite8(value, clk->mapped_reg);
 33	else if (clk->flags & CLK_ENABLE_REG_16BIT)
 34		iowrite16(value, clk->mapped_reg);
 35	else
 36		iowrite32(value, clk->mapped_reg);
 37}
 38
 39static int sh_clk_mstp_enable(struct clk *clk)
 40{
 41	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
 42	if (clk->status_reg) {
 43		unsigned int (*read)(const void __iomem *addr);
 44		int i;
 45		void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
 46			(phys_addr_t)clk->enable_reg + clk->mapped_reg;
 47
 48		if (clk->flags & CLK_ENABLE_REG_8BIT)
 49			read = ioread8;
 50		else if (clk->flags & CLK_ENABLE_REG_16BIT)
 51			read = ioread16;
 52		else
 53			read = ioread32;
 54
 55		for (i = 1000;
 56		     (read(mapped_status) & (1 << clk->enable_bit)) && i;
 57		     i--)
 58			cpu_relax();
 59		if (!i) {
 60			pr_err("cpg: failed to enable %p[%d]\n",
 61			       clk->enable_reg, clk->enable_bit);
 62			return -ETIMEDOUT;
 63		}
 64	}
 65	return 0;
 66}
 67
 68static void sh_clk_mstp_disable(struct clk *clk)
 69{
 70	sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
 
 71}
 72
 73static struct sh_clk_ops sh_clk_mstp_clk_ops = {
 74	.enable		= sh_clk_mstp_enable,
 75	.disable	= sh_clk_mstp_disable,
 76	.recalc		= followparent_recalc,
 77};
 78
 79int __init sh_clk_mstp_register(struct clk *clks, int nr)
 80{
 81	struct clk *clkp;
 82	int ret = 0;
 83	int k;
 84
 85	for (k = 0; !ret && (k < nr); k++) {
 86		clkp = clks + k;
 87		clkp->ops = &sh_clk_mstp_clk_ops;
 88		ret |= clk_register(clkp);
 89	}
 90
 91	return ret;
 92}
 93
 94/*
 95 * Div/mult table lookup helpers
 96 */
 97static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
 98{
 99	return clk->priv;
100}
101
102static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
103{
104	return clk_to_div_table(clk)->div_mult_table;
105}
106
107/*
108 * Common div ops
109 */
110static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
111{
112	return clk_rate_table_round(clk, clk->freq_table, rate);
113}
114
115static unsigned long sh_clk_div_recalc(struct clk *clk)
 
 
 
 
 
 
 
 
 
 
 
 
116{
117	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
118	unsigned int idx;
119
120	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
121			     table, clk->arch_flags ? &clk->arch_flags : NULL);
122
123	idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
124
125	return clk->freq_table[idx].frequency;
126}
127
128static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
129{
130	struct clk_div_table *dt = clk_to_div_table(clk);
131	unsigned long value;
132	int idx;
133
134	idx = clk_rate_table_find(clk, clk->freq_table, rate);
135	if (idx < 0)
136		return idx;
137
138	value = sh_clk_read(clk);
139	value &= ~(clk->div_mask << clk->enable_bit);
140	value |= (idx << clk->enable_bit);
141	sh_clk_write(value, clk);
 
 
 
 
 
 
 
 
 
 
 
 
142
143	/* XXX: Should use a post-change notifier */
144	if (dt->kick)
145		dt->kick(clk);
146
147	return 0;
148}
149
150static int sh_clk_div_enable(struct clk *clk)
151{
152	if (clk->div_mask == SH_CLK_DIV6_MSK) {
153		int ret = sh_clk_div_set_rate(clk, clk->rate);
154		if (ret < 0)
155			return ret;
156	}
 
157
158	sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
 
 
 
159	return 0;
160}
161
162static void sh_clk_div_disable(struct clk *clk)
163{
164	unsigned int val;
 
165
166	val = sh_clk_read(clk);
167	val |= CPG_CKSTP_BIT;
 
 
 
 
 
 
168
169	/*
170	 * div6 clocks require the divisor field to be non-zero or the
171	 * above CKSTP toggle silently fails. Ensure that the divisor
172	 * array is reset to its initial state on disable.
173	 */
174	if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
175		val |= clk->div_mask;
176
177	sh_clk_write(val, clk);
 
 
 
178}
179
180static struct sh_clk_ops sh_clk_div_clk_ops = {
181	.recalc		= sh_clk_div_recalc,
182	.set_rate	= sh_clk_div_set_rate,
183	.round_rate	= sh_clk_div_round_rate,
 
 
 
184};
185
186static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
187	.recalc		= sh_clk_div_recalc,
188	.set_rate	= sh_clk_div_set_rate,
189	.round_rate	= sh_clk_div_round_rate,
190	.enable		= sh_clk_div_enable,
191	.disable	= sh_clk_div_disable,
 
 
192};
193
194static int __init sh_clk_init_parent(struct clk *clk)
195{
196	u32 val;
197
198	if (clk->parent)
199		return 0;
200
201	if (!clk->parent_table || !clk->parent_num)
202		return 0;
203
204	if (!clk->src_width) {
205		pr_err("sh_clk_init_parent: cannot select parent clock\n");
206		return -EINVAL;
207	}
208
209	val  = (sh_clk_read(clk) >> clk->src_shift);
210	val &= (1 << clk->src_width) - 1;
211
212	if (val >= clk->parent_num) {
213		pr_err("sh_clk_init_parent: parent table size failed\n");
214		return -EINVAL;
215	}
216
217	clk_reparent(clk, clk->parent_table[val]);
218	if (!clk->parent) {
219		pr_err("sh_clk_init_parent: unable to set parent");
220		return -EINVAL;
221	}
222
223	return 0;
224}
225
226static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
227			struct clk_div_table *table, struct sh_clk_ops *ops)
228{
229	struct clk *clkp;
230	void *freq_table;
231	int nr_divs = table->div_mult_table->nr_divisors;
232	int freq_table_size = sizeof(struct cpufreq_frequency_table);
233	int ret = 0;
234	int k;
235
236	freq_table_size *= (nr_divs + 1);
237	freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
238	if (!freq_table) {
239		pr_err("%s: unable to alloc memory\n", __func__);
240		return -ENOMEM;
241	}
242
243	for (k = 0; !ret && (k < nr); k++) {
244		clkp = clks + k;
245
246		clkp->ops = ops;
247		clkp->priv = table;
248
249		clkp->freq_table = freq_table + (k * freq_table_size);
250		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
251
252		ret = clk_register(clkp);
253		if (ret == 0)
254			ret = sh_clk_init_parent(clkp);
255	}
256
257	return ret;
258}
259
260/*
261 * div6 support
262 */
263static int sh_clk_div6_divisors[64] = {
264	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
265	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
266	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
267	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
268};
269
270static struct clk_div_mult_table div6_div_mult_table = {
271	.divisors = sh_clk_div6_divisors,
272	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
273};
274
275static struct clk_div_table sh_clk_div6_table = {
276	.div_mult_table	= &div6_div_mult_table,
277};
278
279static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
280{
281	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
282	u32 value;
283	int ret, i;
284
285	if (!clk->parent_table || !clk->parent_num)
286		return -EINVAL;
287
288	/* Search the parent */
289	for (i = 0; i < clk->parent_num; i++)
290		if (clk->parent_table[i] == parent)
291			break;
292
293	if (i == clk->parent_num)
294		return -ENODEV;
295
296	ret = clk_reparent(clk, parent);
297	if (ret < 0)
298		return ret;
299
300	value = sh_clk_read(clk) &
301		~(((1 << clk->src_width) - 1) << clk->src_shift);
302
303	sh_clk_write(value | (i << clk->src_shift), clk);
304
305	/* Rebuild the frequency table */
306	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
307			     table, NULL);
308
309	return 0;
310}
311
312static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
313	.recalc		= sh_clk_div_recalc,
314	.round_rate	= sh_clk_div_round_rate,
315	.set_rate	= sh_clk_div_set_rate,
316	.enable		= sh_clk_div_enable,
317	.disable	= sh_clk_div_disable,
318	.set_parent	= sh_clk_div6_set_parent,
319};
320
321int __init sh_clk_div6_register(struct clk *clks, int nr)
322{
323	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
324				       &sh_clk_div_enable_clk_ops);
325}
326
327int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
328{
329	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
330				       &sh_clk_div6_reparent_clk_ops);
 
 
 
 
 
 
 
 
331}
332
333/*
334 * div4 support
335 */
336static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
337{
338	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
 
339	u32 value;
340	int ret;
341
342	/* we really need a better way to determine parent index, but for
343	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
344	 * no CLK_ENABLE_ON_INIT means external clock...
345	 */
346
347	if (parent->flags & CLK_ENABLE_ON_INIT)
348		value = sh_clk_read(clk) & ~(1 << 7);
349	else
350		value = sh_clk_read(clk) | (1 << 7);
351
352	ret = clk_reparent(clk, parent);
353	if (ret < 0)
354		return ret;
355
356	sh_clk_write(value, clk);
357
358	/* Rebiuld the frequency table */
359	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
360			     table, &clk->arch_flags);
361
362	return 0;
363}
364
365static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
366	.recalc		= sh_clk_div_recalc,
367	.set_rate	= sh_clk_div_set_rate,
368	.round_rate	= sh_clk_div_round_rate,
369	.enable		= sh_clk_div_enable,
370	.disable	= sh_clk_div_disable,
371	.set_parent	= sh_clk_div4_set_parent,
372};
373
374int __init sh_clk_div4_register(struct clk *clks, int nr,
375				struct clk_div4_table *table)
376{
377	return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
378}
379
380int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
381				struct clk_div4_table *table)
382{
383	return sh_clk_div_register_ops(clks, nr, table,
384				       &sh_clk_div_enable_clk_ops);
385}
386
387int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
388				struct clk_div4_table *table)
389{
390	return sh_clk_div_register_ops(clks, nr, table,
391				       &sh_clk_div4_reparent_clk_ops);
392}
393
394/* FSI-DIV */
395static unsigned long fsidiv_recalc(struct clk *clk)
396{
397	u32 value;
398
399	value = __raw_readl(clk->mapping->base);
 
 
 
400
401	value >>= 16;
402	if (value < 2)
403		return clk->parent->rate;
404
405	return clk->parent->rate / value;
406}
407
408static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
409{
410	return clk_rate_div_range_round(clk, 1, 0xffff, rate);
 
411}
412
413static void fsidiv_disable(struct clk *clk)
414{
415	__raw_writel(0, clk->mapping->base);
416}
417
418static int fsidiv_enable(struct clk *clk)
419{
420	u32 value;
421
422	value  = __raw_readl(clk->mapping->base) >> 16;
423	if (value < 2)
424		return 0;
425
426	__raw_writel((value << 16) | 0x3, clk->mapping->base);
 
 
 
 
 
 
427
428	return 0;
429}
 
 
 
 
 
 
430
431static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
 
432{
433	int idx;
434
435	idx = (clk->parent->rate / rate) & 0xffff;
436	if (idx < 2)
437		__raw_writel(0, clk->mapping->base);
438	else
439		__raw_writel(idx << 16, clk->mapping->base);
440
441	return 0;
442}
 
 
 
 
443
444static struct sh_clk_ops fsidiv_clk_ops = {
445	.recalc		= fsidiv_recalc,
446	.round_rate	= fsidiv_round_rate,
447	.set_rate	= fsidiv_set_rate,
448	.enable		= fsidiv_enable,
449	.disable	= fsidiv_disable,
450};
451
452int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
453{
454	struct clk_mapping *map;
455	int i;
456
457	for (i = 0; i < nr; i++) {
 
458
459		map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
460		if (!map) {
461			pr_err("%s: unable to alloc memory\n", __func__);
462			return -ENOMEM;
463		}
464
465		/* clks[i].enable_reg came from SH_CLK_FSIDIV() */
466		map->phys		= (phys_addr_t)clks[i].enable_reg;
467		map->len		= 8;
468
469		clks[i].enable_reg	= 0; /* remove .enable_reg */
470		clks[i].ops		= &fsidiv_clk_ops;
471		clks[i].mapping		= map;
 
 
472
473		clk_register(&clks[i]);
474	}
 
 
 
 
475
476	return 0;
 
 
 
 
477}
v3.1
  1/*
  2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
  3 *
  4 *  Copyright (C) 2010  Magnus Damm
 
  5 *
  6 * This file is subject to the terms and conditions of the GNU General Public
  7 * License.  See the file "COPYING" in the main directory of this archive
  8 * for more details.
  9 */
 10#include <linux/clk.h>
 11#include <linux/compiler.h>
 12#include <linux/slab.h>
 13#include <linux/io.h>
 14#include <linux/sh_clk.h>
 15
 16static int sh_clk_mstp32_enable(struct clk *clk)
 
 
 
 
 
 
 
 
 
 
 
 
 17{
 18	__raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
 19		     clk->enable_reg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20	return 0;
 21}
 22
 23static void sh_clk_mstp32_disable(struct clk *clk)
 24{
 25	__raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
 26		     clk->enable_reg);
 27}
 28
 29static struct clk_ops sh_clk_mstp32_clk_ops = {
 30	.enable		= sh_clk_mstp32_enable,
 31	.disable	= sh_clk_mstp32_disable,
 32	.recalc		= followparent_recalc,
 33};
 34
 35int __init sh_clk_mstp32_register(struct clk *clks, int nr)
 36{
 37	struct clk *clkp;
 38	int ret = 0;
 39	int k;
 40
 41	for (k = 0; !ret && (k < nr); k++) {
 42		clkp = clks + k;
 43		clkp->ops = &sh_clk_mstp32_clk_ops;
 44		ret |= clk_register(clkp);
 45	}
 46
 47	return ret;
 48}
 49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
 51{
 52	return clk_rate_table_round(clk, clk->freq_table, rate);
 53}
 54
 55static int sh_clk_div6_divisors[64] = {
 56	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 57	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
 58	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
 59	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
 60};
 61
 62static struct clk_div_mult_table sh_clk_div6_table = {
 63	.divisors = sh_clk_div6_divisors,
 64	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
 65};
 66
 67static unsigned long sh_clk_div6_recalc(struct clk *clk)
 68{
 69	struct clk_div_mult_table *table = &sh_clk_div6_table;
 70	unsigned int idx;
 71
 72	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
 73			     table, NULL);
 74
 75	idx = __raw_readl(clk->enable_reg) & 0x003f;
 76
 77	return clk->freq_table[idx].frequency;
 78}
 79
 80static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
 81{
 82	struct clk_div_mult_table *table = &sh_clk_div6_table;
 83	u32 value;
 84	int ret, i;
 85
 86	if (!clk->parent_table || !clk->parent_num)
 87		return -EINVAL;
 
 88
 89	/* Search the parent */
 90	for (i = 0; i < clk->parent_num; i++)
 91		if (clk->parent_table[i] == parent)
 92			break;
 93
 94	if (i == clk->parent_num)
 95		return -ENODEV;
 96
 97	ret = clk_reparent(clk, parent);
 98	if (ret < 0)
 99		return ret;
100
101	value = __raw_readl(clk->enable_reg) &
102		~(((1 << clk->src_width) - 1) << clk->src_shift);
103
104	__raw_writel(value | (i << clk->src_shift), clk->enable_reg);
105
106	/* Rebuild the frequency table */
107	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
108			     table, NULL);
109
110	return 0;
111}
112
113static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
114{
115	unsigned long value;
116	int idx;
117
118	idx = clk_rate_table_find(clk, clk->freq_table, rate);
119	if (idx < 0)
120		return idx;
121
122	value = __raw_readl(clk->enable_reg);
123	value &= ~0x3f;
124	value |= idx;
125	__raw_writel(value, clk->enable_reg);
126	return 0;
127}
128
129static int sh_clk_div6_enable(struct clk *clk)
130{
131	unsigned long value;
132	int ret;
133
134	ret = sh_clk_div6_set_rate(clk, clk->rate);
135	if (ret == 0) {
136		value = __raw_readl(clk->enable_reg);
137		value &= ~0x100; /* clear stop bit to enable clock */
138		__raw_writel(value, clk->enable_reg);
139	}
140	return ret;
141}
142
143static void sh_clk_div6_disable(struct clk *clk)
144{
145	unsigned long value;
 
 
 
 
146
147	value = __raw_readl(clk->enable_reg);
148	value |= 0x100; /* stop clock */
149	value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
150	__raw_writel(value, clk->enable_reg);
151}
152
153static struct clk_ops sh_clk_div6_clk_ops = {
154	.recalc		= sh_clk_div6_recalc,
 
155	.round_rate	= sh_clk_div_round_rate,
156	.set_rate	= sh_clk_div6_set_rate,
157	.enable		= sh_clk_div6_enable,
158	.disable	= sh_clk_div6_disable,
159};
160
161static struct clk_ops sh_clk_div6_reparent_clk_ops = {
162	.recalc		= sh_clk_div6_recalc,
 
163	.round_rate	= sh_clk_div_round_rate,
164	.set_rate	= sh_clk_div6_set_rate,
165	.enable		= sh_clk_div6_enable,
166	.disable	= sh_clk_div6_disable,
167	.set_parent	= sh_clk_div6_set_parent,
168};
169
170static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
171					   struct clk_ops *ops)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172{
173	struct clk *clkp;
174	void *freq_table;
175	int nr_divs = sh_clk_div6_table.nr_divisors;
176	int freq_table_size = sizeof(struct cpufreq_frequency_table);
177	int ret = 0;
178	int k;
179
180	freq_table_size *= (nr_divs + 1);
181	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
182	if (!freq_table) {
183		pr_err("sh_clk_div6_register: unable to alloc memory\n");
184		return -ENOMEM;
185	}
186
187	for (k = 0; !ret && (k < nr); k++) {
188		clkp = clks + k;
189
190		clkp->ops = ops;
 
 
191		clkp->freq_table = freq_table + (k * freq_table_size);
192		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
193
194		ret = clk_register(clkp);
 
 
195	}
196
197	return ret;
198}
199
200int __init sh_clk_div6_register(struct clk *clks, int nr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201{
202	return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203}
204
205int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
 
 
 
 
 
 
 
 
 
206{
207	return sh_clk_div6_register_ops(clks, nr,
208					&sh_clk_div6_reparent_clk_ops);
209}
210
211static unsigned long sh_clk_div4_recalc(struct clk *clk)
212{
213	struct clk_div4_table *d4t = clk->priv;
214	struct clk_div_mult_table *table = d4t->div_mult_table;
215	unsigned int idx;
216
217	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
218			     table, &clk->arch_flags);
219
220	idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
221
222	return clk->freq_table[idx].frequency;
223}
224
 
 
 
225static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
226{
227	struct clk_div4_table *d4t = clk->priv;
228	struct clk_div_mult_table *table = d4t->div_mult_table;
229	u32 value;
230	int ret;
231
232	/* we really need a better way to determine parent index, but for
233	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
234	 * no CLK_ENABLE_ON_INIT means external clock...
235	 */
236
237	if (parent->flags & CLK_ENABLE_ON_INIT)
238		value = __raw_readl(clk->enable_reg) & ~(1 << 7);
239	else
240		value = __raw_readl(clk->enable_reg) | (1 << 7);
241
242	ret = clk_reparent(clk, parent);
243	if (ret < 0)
244		return ret;
245
246	__raw_writel(value, clk->enable_reg);
247
248	/* Rebiuld the frequency table */
249	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
250			     table, &clk->arch_flags);
251
252	return 0;
253}
254
255static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
 
 
 
 
 
 
 
 
 
 
256{
257	struct clk_div4_table *d4t = clk->priv;
258	unsigned long value;
259	int idx = clk_rate_table_find(clk, clk->freq_table, rate);
260	if (idx < 0)
261		return idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
263	value = __raw_readl(clk->enable_reg);
264	value &= ~(0xf << clk->enable_bit);
265	value |= (idx << clk->enable_bit);
266	__raw_writel(value, clk->enable_reg);
267
268	if (d4t->kick)
269		d4t->kick(clk);
 
270
271	return 0;
272}
273
274static int sh_clk_div4_enable(struct clk *clk)
275{
276	__raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
277	return 0;
278}
279
280static void sh_clk_div4_disable(struct clk *clk)
281{
282	__raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
283}
284
285static struct clk_ops sh_clk_div4_clk_ops = {
286	.recalc		= sh_clk_div4_recalc,
287	.set_rate	= sh_clk_div4_set_rate,
288	.round_rate	= sh_clk_div_round_rate,
289};
 
 
290
291static struct clk_ops sh_clk_div4_enable_clk_ops = {
292	.recalc		= sh_clk_div4_recalc,
293	.set_rate	= sh_clk_div4_set_rate,
294	.round_rate	= sh_clk_div_round_rate,
295	.enable		= sh_clk_div4_enable,
296	.disable	= sh_clk_div4_disable,
297};
298
299static struct clk_ops sh_clk_div4_reparent_clk_ops = {
300	.recalc		= sh_clk_div4_recalc,
301	.set_rate	= sh_clk_div4_set_rate,
302	.round_rate	= sh_clk_div_round_rate,
303	.enable		= sh_clk_div4_enable,
304	.disable	= sh_clk_div4_disable,
305	.set_parent	= sh_clk_div4_set_parent,
306};
307
308static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
309			struct clk_div4_table *table, struct clk_ops *ops)
310{
311	struct clk *clkp;
312	void *freq_table;
313	int nr_divs = table->div_mult_table->nr_divisors;
314	int freq_table_size = sizeof(struct cpufreq_frequency_table);
315	int ret = 0;
316	int k;
 
317
318	freq_table_size *= (nr_divs + 1);
319	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
320	if (!freq_table) {
321		pr_err("sh_clk_div4_register: unable to alloc memory\n");
322		return -ENOMEM;
323	}
324
325	for (k = 0; !ret && (k < nr); k++) {
326		clkp = clks + k;
 
 
 
 
 
327
328		clkp->ops = ops;
329		clkp->priv = table;
 
 
330
331		clkp->freq_table = freq_table + (k * freq_table_size);
332		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
333
334		ret = clk_register(clkp);
335	}
 
 
 
336
337	return ret;
338}
 
339
340int __init sh_clk_div4_register(struct clk *clks, int nr,
341				struct clk_div4_table *table)
342{
343	return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
344}
345
346int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
347				struct clk_div4_table *table)
348{
349	return sh_clk_div4_register_ops(clks, nr, table,
350					&sh_clk_div4_enable_clk_ops);
351}
352
353int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
354				struct clk_div4_table *table)
355{
356	return sh_clk_div4_register_ops(clks, nr, table,
357					&sh_clk_div4_reparent_clk_ops);
358}