Loading...
1/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 - 2012 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
17#define CPG_CKSTP_BIT BIT(8)
18
19static unsigned int sh_clk_read(struct clk *clk)
20{
21 if (clk->flags & CLK_ENABLE_REG_8BIT)
22 return ioread8(clk->mapped_reg);
23 else if (clk->flags & CLK_ENABLE_REG_16BIT)
24 return ioread16(clk->mapped_reg);
25
26 return ioread32(clk->mapped_reg);
27}
28
29static void sh_clk_write(int value, struct clk *clk)
30{
31 if (clk->flags & CLK_ENABLE_REG_8BIT)
32 iowrite8(value, clk->mapped_reg);
33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 iowrite16(value, clk->mapped_reg);
35 else
36 iowrite32(value, clk->mapped_reg);
37}
38
39static unsigned int r8(const void __iomem *addr)
40{
41 return ioread8(addr);
42}
43
44static unsigned int r16(const void __iomem *addr)
45{
46 return ioread16(addr);
47}
48
49static unsigned int r32(const void __iomem *addr)
50{
51 return ioread32(addr);
52}
53
54static int sh_clk_mstp_enable(struct clk *clk)
55{
56 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
57 if (clk->status_reg) {
58 unsigned int (*read)(const void __iomem *addr);
59 int i;
60 void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
61 (phys_addr_t)clk->enable_reg + clk->mapped_reg;
62
63 if (clk->flags & CLK_ENABLE_REG_8BIT)
64 read = r8;
65 else if (clk->flags & CLK_ENABLE_REG_16BIT)
66 read = r16;
67 else
68 read = r32;
69
70 for (i = 1000;
71 (read(mapped_status) & (1 << clk->enable_bit)) && i;
72 i--)
73 cpu_relax();
74 if (!i) {
75 pr_err("cpg: failed to enable %p[%d]\n",
76 clk->enable_reg, clk->enable_bit);
77 return -ETIMEDOUT;
78 }
79 }
80 return 0;
81}
82
83static void sh_clk_mstp_disable(struct clk *clk)
84{
85 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
86}
87
88static struct sh_clk_ops sh_clk_mstp_clk_ops = {
89 .enable = sh_clk_mstp_enable,
90 .disable = sh_clk_mstp_disable,
91 .recalc = followparent_recalc,
92};
93
94int __init sh_clk_mstp_register(struct clk *clks, int nr)
95{
96 struct clk *clkp;
97 int ret = 0;
98 int k;
99
100 for (k = 0; !ret && (k < nr); k++) {
101 clkp = clks + k;
102 clkp->ops = &sh_clk_mstp_clk_ops;
103 ret |= clk_register(clkp);
104 }
105
106 return ret;
107}
108
109/*
110 * Div/mult table lookup helpers
111 */
112static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
113{
114 return clk->priv;
115}
116
117static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
118{
119 return clk_to_div_table(clk)->div_mult_table;
120}
121
122/*
123 * Common div ops
124 */
125static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
126{
127 return clk_rate_table_round(clk, clk->freq_table, rate);
128}
129
130static unsigned long sh_clk_div_recalc(struct clk *clk)
131{
132 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
133 unsigned int idx;
134
135 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
136 table, clk->arch_flags ? &clk->arch_flags : NULL);
137
138 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
139
140 return clk->freq_table[idx].frequency;
141}
142
143static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
144{
145 struct clk_div_table *dt = clk_to_div_table(clk);
146 unsigned long value;
147 int idx;
148
149 idx = clk_rate_table_find(clk, clk->freq_table, rate);
150 if (idx < 0)
151 return idx;
152
153 value = sh_clk_read(clk);
154 value &= ~(clk->div_mask << clk->enable_bit);
155 value |= (idx << clk->enable_bit);
156 sh_clk_write(value, clk);
157
158 /* XXX: Should use a post-change notifier */
159 if (dt->kick)
160 dt->kick(clk);
161
162 return 0;
163}
164
165static int sh_clk_div_enable(struct clk *clk)
166{
167 if (clk->div_mask == SH_CLK_DIV6_MSK) {
168 int ret = sh_clk_div_set_rate(clk, clk->rate);
169 if (ret < 0)
170 return ret;
171 }
172
173 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
174 return 0;
175}
176
177static void sh_clk_div_disable(struct clk *clk)
178{
179 unsigned int val;
180
181 val = sh_clk_read(clk);
182 val |= CPG_CKSTP_BIT;
183
184 /*
185 * div6 clocks require the divisor field to be non-zero or the
186 * above CKSTP toggle silently fails. Ensure that the divisor
187 * array is reset to its initial state on disable.
188 */
189 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
190 val |= clk->div_mask;
191
192 sh_clk_write(val, clk);
193}
194
195static struct sh_clk_ops sh_clk_div_clk_ops = {
196 .recalc = sh_clk_div_recalc,
197 .set_rate = sh_clk_div_set_rate,
198 .round_rate = sh_clk_div_round_rate,
199};
200
201static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
202 .recalc = sh_clk_div_recalc,
203 .set_rate = sh_clk_div_set_rate,
204 .round_rate = sh_clk_div_round_rate,
205 .enable = sh_clk_div_enable,
206 .disable = sh_clk_div_disable,
207};
208
209static int __init sh_clk_init_parent(struct clk *clk)
210{
211 u32 val;
212
213 if (clk->parent)
214 return 0;
215
216 if (!clk->parent_table || !clk->parent_num)
217 return 0;
218
219 if (!clk->src_width) {
220 pr_err("sh_clk_init_parent: cannot select parent clock\n");
221 return -EINVAL;
222 }
223
224 val = (sh_clk_read(clk) >> clk->src_shift);
225 val &= (1 << clk->src_width) - 1;
226
227 if (val >= clk->parent_num) {
228 pr_err("sh_clk_init_parent: parent table size failed\n");
229 return -EINVAL;
230 }
231
232 clk_reparent(clk, clk->parent_table[val]);
233 if (!clk->parent) {
234 pr_err("sh_clk_init_parent: unable to set parent");
235 return -EINVAL;
236 }
237
238 return 0;
239}
240
241static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
242 struct clk_div_table *table, struct sh_clk_ops *ops)
243{
244 struct clk *clkp;
245 void *freq_table;
246 int nr_divs = table->div_mult_table->nr_divisors;
247 int freq_table_size = sizeof(struct cpufreq_frequency_table);
248 int ret = 0;
249 int k;
250
251 freq_table_size *= (nr_divs + 1);
252 freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
253 if (!freq_table) {
254 pr_err("%s: unable to alloc memory\n", __func__);
255 return -ENOMEM;
256 }
257
258 for (k = 0; !ret && (k < nr); k++) {
259 clkp = clks + k;
260
261 clkp->ops = ops;
262 clkp->priv = table;
263
264 clkp->freq_table = freq_table + (k * freq_table_size);
265 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
266
267 ret = clk_register(clkp);
268 if (ret == 0)
269 ret = sh_clk_init_parent(clkp);
270 }
271
272 return ret;
273}
274
275/*
276 * div6 support
277 */
278static int sh_clk_div6_divisors[64] = {
279 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
280 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
281 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
282 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
283};
284
285static struct clk_div_mult_table div6_div_mult_table = {
286 .divisors = sh_clk_div6_divisors,
287 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
288};
289
290static struct clk_div_table sh_clk_div6_table = {
291 .div_mult_table = &div6_div_mult_table,
292};
293
294static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
295{
296 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
297 u32 value;
298 int ret, i;
299
300 if (!clk->parent_table || !clk->parent_num)
301 return -EINVAL;
302
303 /* Search the parent */
304 for (i = 0; i < clk->parent_num; i++)
305 if (clk->parent_table[i] == parent)
306 break;
307
308 if (i == clk->parent_num)
309 return -ENODEV;
310
311 ret = clk_reparent(clk, parent);
312 if (ret < 0)
313 return ret;
314
315 value = sh_clk_read(clk) &
316 ~(((1 << clk->src_width) - 1) << clk->src_shift);
317
318 sh_clk_write(value | (i << clk->src_shift), clk);
319
320 /* Rebuild the frequency table */
321 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
322 table, NULL);
323
324 return 0;
325}
326
327static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
328 .recalc = sh_clk_div_recalc,
329 .round_rate = sh_clk_div_round_rate,
330 .set_rate = sh_clk_div_set_rate,
331 .enable = sh_clk_div_enable,
332 .disable = sh_clk_div_disable,
333 .set_parent = sh_clk_div6_set_parent,
334};
335
336int __init sh_clk_div6_register(struct clk *clks, int nr)
337{
338 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
339 &sh_clk_div_enable_clk_ops);
340}
341
342int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
343{
344 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
345 &sh_clk_div6_reparent_clk_ops);
346}
347
348/*
349 * div4 support
350 */
351static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
352{
353 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
354 u32 value;
355 int ret;
356
357 /* we really need a better way to determine parent index, but for
358 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
359 * no CLK_ENABLE_ON_INIT means external clock...
360 */
361
362 if (parent->flags & CLK_ENABLE_ON_INIT)
363 value = sh_clk_read(clk) & ~(1 << 7);
364 else
365 value = sh_clk_read(clk) | (1 << 7);
366
367 ret = clk_reparent(clk, parent);
368 if (ret < 0)
369 return ret;
370
371 sh_clk_write(value, clk);
372
373 /* Rebiuld the frequency table */
374 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
375 table, &clk->arch_flags);
376
377 return 0;
378}
379
380static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
381 .recalc = sh_clk_div_recalc,
382 .set_rate = sh_clk_div_set_rate,
383 .round_rate = sh_clk_div_round_rate,
384 .enable = sh_clk_div_enable,
385 .disable = sh_clk_div_disable,
386 .set_parent = sh_clk_div4_set_parent,
387};
388
389int __init sh_clk_div4_register(struct clk *clks, int nr,
390 struct clk_div4_table *table)
391{
392 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
393}
394
395int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
396 struct clk_div4_table *table)
397{
398 return sh_clk_div_register_ops(clks, nr, table,
399 &sh_clk_div_enable_clk_ops);
400}
401
402int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
403 struct clk_div4_table *table)
404{
405 return sh_clk_div_register_ops(clks, nr, table,
406 &sh_clk_div4_reparent_clk_ops);
407}
408
409/* FSI-DIV */
410static unsigned long fsidiv_recalc(struct clk *clk)
411{
412 u32 value;
413
414 value = __raw_readl(clk->mapping->base);
415
416 value >>= 16;
417 if (value < 2)
418 return clk->parent->rate;
419
420 return clk->parent->rate / value;
421}
422
423static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
424{
425 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
426}
427
428static void fsidiv_disable(struct clk *clk)
429{
430 __raw_writel(0, clk->mapping->base);
431}
432
433static int fsidiv_enable(struct clk *clk)
434{
435 u32 value;
436
437 value = __raw_readl(clk->mapping->base) >> 16;
438 if (value < 2)
439 return 0;
440
441 __raw_writel((value << 16) | 0x3, clk->mapping->base);
442
443 return 0;
444}
445
446static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
447{
448 int idx;
449
450 idx = (clk->parent->rate / rate) & 0xffff;
451 if (idx < 2)
452 __raw_writel(0, clk->mapping->base);
453 else
454 __raw_writel(idx << 16, clk->mapping->base);
455
456 return 0;
457}
458
459static struct sh_clk_ops fsidiv_clk_ops = {
460 .recalc = fsidiv_recalc,
461 .round_rate = fsidiv_round_rate,
462 .set_rate = fsidiv_set_rate,
463 .enable = fsidiv_enable,
464 .disable = fsidiv_disable,
465};
466
467int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
468{
469 struct clk_mapping *map;
470 int i;
471
472 for (i = 0; i < nr; i++) {
473
474 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
475 if (!map) {
476 pr_err("%s: unable to alloc memory\n", __func__);
477 return -ENOMEM;
478 }
479
480 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
481 map->phys = (phys_addr_t)clks[i].enable_reg;
482 map->len = 8;
483
484 clks[i].enable_reg = 0; /* remove .enable_reg */
485 clks[i].ops = &fsidiv_clk_ops;
486 clks[i].mapping = map;
487
488 clk_register(&clks[i]);
489 }
490
491 return 0;
492}
1/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 - 2012 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
17static unsigned int sh_clk_read(struct clk *clk)
18{
19 if (clk->flags & CLK_ENABLE_REG_8BIT)
20 return ioread8(clk->mapped_reg);
21 else if (clk->flags & CLK_ENABLE_REG_16BIT)
22 return ioread16(clk->mapped_reg);
23
24 return ioread32(clk->mapped_reg);
25}
26
27static void sh_clk_write(int value, struct clk *clk)
28{
29 if (clk->flags & CLK_ENABLE_REG_8BIT)
30 iowrite8(value, clk->mapped_reg);
31 else if (clk->flags & CLK_ENABLE_REG_16BIT)
32 iowrite16(value, clk->mapped_reg);
33 else
34 iowrite32(value, clk->mapped_reg);
35}
36
37static int sh_clk_mstp_enable(struct clk *clk)
38{
39 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
40 return 0;
41}
42
43static void sh_clk_mstp_disable(struct clk *clk)
44{
45 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
46}
47
48static struct sh_clk_ops sh_clk_mstp_clk_ops = {
49 .enable = sh_clk_mstp_enable,
50 .disable = sh_clk_mstp_disable,
51 .recalc = followparent_recalc,
52};
53
54int __init sh_clk_mstp_register(struct clk *clks, int nr)
55{
56 struct clk *clkp;
57 int ret = 0;
58 int k;
59
60 for (k = 0; !ret && (k < nr); k++) {
61 clkp = clks + k;
62 clkp->ops = &sh_clk_mstp_clk_ops;
63 ret |= clk_register(clkp);
64 }
65
66 return ret;
67}
68
69static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
70{
71 return clk_rate_table_round(clk, clk->freq_table, rate);
72}
73
74static int sh_clk_div6_divisors[64] = {
75 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
76 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
77 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
78 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
79};
80
81static struct clk_div_mult_table sh_clk_div6_table = {
82 .divisors = sh_clk_div6_divisors,
83 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
84};
85
86static unsigned long sh_clk_div6_recalc(struct clk *clk)
87{
88 struct clk_div_mult_table *table = &sh_clk_div6_table;
89 unsigned int idx;
90
91 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
92 table, NULL);
93
94 idx = sh_clk_read(clk) & 0x003f;
95
96 return clk->freq_table[idx].frequency;
97}
98
99static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
100{
101 struct clk_div_mult_table *table = &sh_clk_div6_table;
102 u32 value;
103 int ret, i;
104
105 if (!clk->parent_table || !clk->parent_num)
106 return -EINVAL;
107
108 /* Search the parent */
109 for (i = 0; i < clk->parent_num; i++)
110 if (clk->parent_table[i] == parent)
111 break;
112
113 if (i == clk->parent_num)
114 return -ENODEV;
115
116 ret = clk_reparent(clk, parent);
117 if (ret < 0)
118 return ret;
119
120 value = sh_clk_read(clk) &
121 ~(((1 << clk->src_width) - 1) << clk->src_shift);
122
123 sh_clk_write(value | (i << clk->src_shift), clk);
124
125 /* Rebuild the frequency table */
126 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
127 table, NULL);
128
129 return 0;
130}
131
132static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
133{
134 unsigned long value;
135 int idx;
136
137 idx = clk_rate_table_find(clk, clk->freq_table, rate);
138 if (idx < 0)
139 return idx;
140
141 value = sh_clk_read(clk);
142 value &= ~0x3f;
143 value |= idx;
144 sh_clk_write(value, clk);
145 return 0;
146}
147
148static int sh_clk_div6_enable(struct clk *clk)
149{
150 unsigned long value;
151 int ret;
152
153 ret = sh_clk_div6_set_rate(clk, clk->rate);
154 if (ret == 0) {
155 value = sh_clk_read(clk);
156 value &= ~0x100; /* clear stop bit to enable clock */
157 sh_clk_write(value, clk);
158 }
159 return ret;
160}
161
162static void sh_clk_div6_disable(struct clk *clk)
163{
164 unsigned long value;
165
166 value = sh_clk_read(clk);
167 value |= 0x100; /* stop clock */
168 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
169 sh_clk_write(value, clk);
170}
171
172static struct sh_clk_ops sh_clk_div6_clk_ops = {
173 .recalc = sh_clk_div6_recalc,
174 .round_rate = sh_clk_div_round_rate,
175 .set_rate = sh_clk_div6_set_rate,
176 .enable = sh_clk_div6_enable,
177 .disable = sh_clk_div6_disable,
178};
179
180static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
181 .recalc = sh_clk_div6_recalc,
182 .round_rate = sh_clk_div_round_rate,
183 .set_rate = sh_clk_div6_set_rate,
184 .enable = sh_clk_div6_enable,
185 .disable = sh_clk_div6_disable,
186 .set_parent = sh_clk_div6_set_parent,
187};
188
189static int __init sh_clk_init_parent(struct clk *clk)
190{
191 u32 val;
192
193 if (clk->parent)
194 return 0;
195
196 if (!clk->parent_table || !clk->parent_num)
197 return 0;
198
199 if (!clk->src_width) {
200 pr_err("sh_clk_init_parent: cannot select parent clock\n");
201 return -EINVAL;
202 }
203
204 val = (sh_clk_read(clk) >> clk->src_shift);
205 val &= (1 << clk->src_width) - 1;
206
207 if (val >= clk->parent_num) {
208 pr_err("sh_clk_init_parent: parent table size failed\n");
209 return -EINVAL;
210 }
211
212 clk_reparent(clk, clk->parent_table[val]);
213 if (!clk->parent) {
214 pr_err("sh_clk_init_parent: unable to set parent");
215 return -EINVAL;
216 }
217
218 return 0;
219}
220
221static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
222 struct sh_clk_ops *ops)
223{
224 struct clk *clkp;
225 void *freq_table;
226 int nr_divs = sh_clk_div6_table.nr_divisors;
227 int freq_table_size = sizeof(struct cpufreq_frequency_table);
228 int ret = 0;
229 int k;
230
231 freq_table_size *= (nr_divs + 1);
232 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
233 if (!freq_table) {
234 pr_err("sh_clk_div6_register: unable to alloc memory\n");
235 return -ENOMEM;
236 }
237
238 for (k = 0; !ret && (k < nr); k++) {
239 clkp = clks + k;
240
241 clkp->ops = ops;
242 clkp->freq_table = freq_table + (k * freq_table_size);
243 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
244 ret = clk_register(clkp);
245 if (ret < 0)
246 break;
247
248 ret = sh_clk_init_parent(clkp);
249 }
250
251 return ret;
252}
253
254int __init sh_clk_div6_register(struct clk *clks, int nr)
255{
256 return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
257}
258
259int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
260{
261 return sh_clk_div6_register_ops(clks, nr,
262 &sh_clk_div6_reparent_clk_ops);
263}
264
265static unsigned long sh_clk_div4_recalc(struct clk *clk)
266{
267 struct clk_div4_table *d4t = clk->priv;
268 struct clk_div_mult_table *table = d4t->div_mult_table;
269 unsigned int idx;
270
271 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
272 table, &clk->arch_flags);
273
274 idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f;
275
276 return clk->freq_table[idx].frequency;
277}
278
279static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
280{
281 struct clk_div4_table *d4t = clk->priv;
282 struct clk_div_mult_table *table = d4t->div_mult_table;
283 u32 value;
284 int ret;
285
286 /* we really need a better way to determine parent index, but for
287 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
288 * no CLK_ENABLE_ON_INIT means external clock...
289 */
290
291 if (parent->flags & CLK_ENABLE_ON_INIT)
292 value = sh_clk_read(clk) & ~(1 << 7);
293 else
294 value = sh_clk_read(clk) | (1 << 7);
295
296 ret = clk_reparent(clk, parent);
297 if (ret < 0)
298 return ret;
299
300 sh_clk_write(value, clk);
301
302 /* Rebiuld the frequency table */
303 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
304 table, &clk->arch_flags);
305
306 return 0;
307}
308
309static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
310{
311 struct clk_div4_table *d4t = clk->priv;
312 unsigned long value;
313 int idx = clk_rate_table_find(clk, clk->freq_table, rate);
314 if (idx < 0)
315 return idx;
316
317 value = sh_clk_read(clk);
318 value &= ~(0xf << clk->enable_bit);
319 value |= (idx << clk->enable_bit);
320 sh_clk_write(value, clk);
321
322 if (d4t->kick)
323 d4t->kick(clk);
324
325 return 0;
326}
327
328static int sh_clk_div4_enable(struct clk *clk)
329{
330 sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
331 return 0;
332}
333
334static void sh_clk_div4_disable(struct clk *clk)
335{
336 sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
337}
338
339static struct sh_clk_ops sh_clk_div4_clk_ops = {
340 .recalc = sh_clk_div4_recalc,
341 .set_rate = sh_clk_div4_set_rate,
342 .round_rate = sh_clk_div_round_rate,
343};
344
345static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
346 .recalc = sh_clk_div4_recalc,
347 .set_rate = sh_clk_div4_set_rate,
348 .round_rate = sh_clk_div_round_rate,
349 .enable = sh_clk_div4_enable,
350 .disable = sh_clk_div4_disable,
351};
352
353static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
354 .recalc = sh_clk_div4_recalc,
355 .set_rate = sh_clk_div4_set_rate,
356 .round_rate = sh_clk_div_round_rate,
357 .enable = sh_clk_div4_enable,
358 .disable = sh_clk_div4_disable,
359 .set_parent = sh_clk_div4_set_parent,
360};
361
362static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
363 struct clk_div4_table *table, struct sh_clk_ops *ops)
364{
365 struct clk *clkp;
366 void *freq_table;
367 int nr_divs = table->div_mult_table->nr_divisors;
368 int freq_table_size = sizeof(struct cpufreq_frequency_table);
369 int ret = 0;
370 int k;
371
372 freq_table_size *= (nr_divs + 1);
373 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
374 if (!freq_table) {
375 pr_err("sh_clk_div4_register: unable to alloc memory\n");
376 return -ENOMEM;
377 }
378
379 for (k = 0; !ret && (k < nr); k++) {
380 clkp = clks + k;
381
382 clkp->ops = ops;
383 clkp->priv = table;
384
385 clkp->freq_table = freq_table + (k * freq_table_size);
386 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
387
388 ret = clk_register(clkp);
389 }
390
391 return ret;
392}
393
394int __init sh_clk_div4_register(struct clk *clks, int nr,
395 struct clk_div4_table *table)
396{
397 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
398}
399
400int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
401 struct clk_div4_table *table)
402{
403 return sh_clk_div4_register_ops(clks, nr, table,
404 &sh_clk_div4_enable_clk_ops);
405}
406
407int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
408 struct clk_div4_table *table)
409{
410 return sh_clk_div4_register_ops(clks, nr, table,
411 &sh_clk_div4_reparent_clk_ops);
412}