Loading...
1/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 - 2012 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
17#define CPG_CKSTP_BIT BIT(8)
18
19static unsigned int sh_clk_read(struct clk *clk)
20{
21 if (clk->flags & CLK_ENABLE_REG_8BIT)
22 return ioread8(clk->mapped_reg);
23 else if (clk->flags & CLK_ENABLE_REG_16BIT)
24 return ioread16(clk->mapped_reg);
25
26 return ioread32(clk->mapped_reg);
27}
28
29static void sh_clk_write(int value, struct clk *clk)
30{
31 if (clk->flags & CLK_ENABLE_REG_8BIT)
32 iowrite8(value, clk->mapped_reg);
33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 iowrite16(value, clk->mapped_reg);
35 else
36 iowrite32(value, clk->mapped_reg);
37}
38
39static int sh_clk_mstp_enable(struct clk *clk)
40{
41 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
42 if (clk->status_reg) {
43 unsigned int (*read)(const void __iomem *addr);
44 int i;
45 void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
46 (phys_addr_t)clk->enable_reg + clk->mapped_reg;
47
48 if (clk->flags & CLK_ENABLE_REG_8BIT)
49 read = ioread8;
50 else if (clk->flags & CLK_ENABLE_REG_16BIT)
51 read = ioread16;
52 else
53 read = ioread32;
54
55 for (i = 1000;
56 (read(mapped_status) & (1 << clk->enable_bit)) && i;
57 i--)
58 cpu_relax();
59 if (!i) {
60 pr_err("cpg: failed to enable %p[%d]\n",
61 clk->enable_reg, clk->enable_bit);
62 return -ETIMEDOUT;
63 }
64 }
65 return 0;
66}
67
68static void sh_clk_mstp_disable(struct clk *clk)
69{
70 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
71}
72
73static struct sh_clk_ops sh_clk_mstp_clk_ops = {
74 .enable = sh_clk_mstp_enable,
75 .disable = sh_clk_mstp_disable,
76 .recalc = followparent_recalc,
77};
78
79int __init sh_clk_mstp_register(struct clk *clks, int nr)
80{
81 struct clk *clkp;
82 int ret = 0;
83 int k;
84
85 for (k = 0; !ret && (k < nr); k++) {
86 clkp = clks + k;
87 clkp->ops = &sh_clk_mstp_clk_ops;
88 ret |= clk_register(clkp);
89 }
90
91 return ret;
92}
93
94/*
95 * Div/mult table lookup helpers
96 */
97static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
98{
99 return clk->priv;
100}
101
102static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
103{
104 return clk_to_div_table(clk)->div_mult_table;
105}
106
107/*
108 * Common div ops
109 */
110static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
111{
112 return clk_rate_table_round(clk, clk->freq_table, rate);
113}
114
115static unsigned long sh_clk_div_recalc(struct clk *clk)
116{
117 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
118 unsigned int idx;
119
120 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
121 table, clk->arch_flags ? &clk->arch_flags : NULL);
122
123 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
124
125 return clk->freq_table[idx].frequency;
126}
127
128static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
129{
130 struct clk_div_table *dt = clk_to_div_table(clk);
131 unsigned long value;
132 int idx;
133
134 idx = clk_rate_table_find(clk, clk->freq_table, rate);
135 if (idx < 0)
136 return idx;
137
138 value = sh_clk_read(clk);
139 value &= ~(clk->div_mask << clk->enable_bit);
140 value |= (idx << clk->enable_bit);
141 sh_clk_write(value, clk);
142
143 /* XXX: Should use a post-change notifier */
144 if (dt->kick)
145 dt->kick(clk);
146
147 return 0;
148}
149
150static int sh_clk_div_enable(struct clk *clk)
151{
152 if (clk->div_mask == SH_CLK_DIV6_MSK) {
153 int ret = sh_clk_div_set_rate(clk, clk->rate);
154 if (ret < 0)
155 return ret;
156 }
157
158 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
159 return 0;
160}
161
162static void sh_clk_div_disable(struct clk *clk)
163{
164 unsigned int val;
165
166 val = sh_clk_read(clk);
167 val |= CPG_CKSTP_BIT;
168
169 /*
170 * div6 clocks require the divisor field to be non-zero or the
171 * above CKSTP toggle silently fails. Ensure that the divisor
172 * array is reset to its initial state on disable.
173 */
174 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
175 val |= clk->div_mask;
176
177 sh_clk_write(val, clk);
178}
179
180static struct sh_clk_ops sh_clk_div_clk_ops = {
181 .recalc = sh_clk_div_recalc,
182 .set_rate = sh_clk_div_set_rate,
183 .round_rate = sh_clk_div_round_rate,
184};
185
186static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
187 .recalc = sh_clk_div_recalc,
188 .set_rate = sh_clk_div_set_rate,
189 .round_rate = sh_clk_div_round_rate,
190 .enable = sh_clk_div_enable,
191 .disable = sh_clk_div_disable,
192};
193
194static int __init sh_clk_init_parent(struct clk *clk)
195{
196 u32 val;
197
198 if (clk->parent)
199 return 0;
200
201 if (!clk->parent_table || !clk->parent_num)
202 return 0;
203
204 if (!clk->src_width) {
205 pr_err("sh_clk_init_parent: cannot select parent clock\n");
206 return -EINVAL;
207 }
208
209 val = (sh_clk_read(clk) >> clk->src_shift);
210 val &= (1 << clk->src_width) - 1;
211
212 if (val >= clk->parent_num) {
213 pr_err("sh_clk_init_parent: parent table size failed\n");
214 return -EINVAL;
215 }
216
217 clk_reparent(clk, clk->parent_table[val]);
218 if (!clk->parent) {
219 pr_err("sh_clk_init_parent: unable to set parent");
220 return -EINVAL;
221 }
222
223 return 0;
224}
225
226static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
227 struct clk_div_table *table, struct sh_clk_ops *ops)
228{
229 struct clk *clkp;
230 void *freq_table;
231 int nr_divs = table->div_mult_table->nr_divisors;
232 int freq_table_size = sizeof(struct cpufreq_frequency_table);
233 int ret = 0;
234 int k;
235
236 freq_table_size *= (nr_divs + 1);
237 freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
238 if (!freq_table) {
239 pr_err("%s: unable to alloc memory\n", __func__);
240 return -ENOMEM;
241 }
242
243 for (k = 0; !ret && (k < nr); k++) {
244 clkp = clks + k;
245
246 clkp->ops = ops;
247 clkp->priv = table;
248
249 clkp->freq_table = freq_table + (k * freq_table_size);
250 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
251
252 ret = clk_register(clkp);
253 if (ret == 0)
254 ret = sh_clk_init_parent(clkp);
255 }
256
257 return ret;
258}
259
260/*
261 * div6 support
262 */
263static int sh_clk_div6_divisors[64] = {
264 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
265 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
266 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
267 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
268};
269
270static struct clk_div_mult_table div6_div_mult_table = {
271 .divisors = sh_clk_div6_divisors,
272 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
273};
274
275static struct clk_div_table sh_clk_div6_table = {
276 .div_mult_table = &div6_div_mult_table,
277};
278
279static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
280{
281 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
282 u32 value;
283 int ret, i;
284
285 if (!clk->parent_table || !clk->parent_num)
286 return -EINVAL;
287
288 /* Search the parent */
289 for (i = 0; i < clk->parent_num; i++)
290 if (clk->parent_table[i] == parent)
291 break;
292
293 if (i == clk->parent_num)
294 return -ENODEV;
295
296 ret = clk_reparent(clk, parent);
297 if (ret < 0)
298 return ret;
299
300 value = sh_clk_read(clk) &
301 ~(((1 << clk->src_width) - 1) << clk->src_shift);
302
303 sh_clk_write(value | (i << clk->src_shift), clk);
304
305 /* Rebuild the frequency table */
306 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
307 table, NULL);
308
309 return 0;
310}
311
312static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
313 .recalc = sh_clk_div_recalc,
314 .round_rate = sh_clk_div_round_rate,
315 .set_rate = sh_clk_div_set_rate,
316 .enable = sh_clk_div_enable,
317 .disable = sh_clk_div_disable,
318 .set_parent = sh_clk_div6_set_parent,
319};
320
321int __init sh_clk_div6_register(struct clk *clks, int nr)
322{
323 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
324 &sh_clk_div_enable_clk_ops);
325}
326
327int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
328{
329 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
330 &sh_clk_div6_reparent_clk_ops);
331}
332
333/*
334 * div4 support
335 */
336static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
337{
338 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
339 u32 value;
340 int ret;
341
342 /* we really need a better way to determine parent index, but for
343 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
344 * no CLK_ENABLE_ON_INIT means external clock...
345 */
346
347 if (parent->flags & CLK_ENABLE_ON_INIT)
348 value = sh_clk_read(clk) & ~(1 << 7);
349 else
350 value = sh_clk_read(clk) | (1 << 7);
351
352 ret = clk_reparent(clk, parent);
353 if (ret < 0)
354 return ret;
355
356 sh_clk_write(value, clk);
357
358 /* Rebiuld the frequency table */
359 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
360 table, &clk->arch_flags);
361
362 return 0;
363}
364
365static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
366 .recalc = sh_clk_div_recalc,
367 .set_rate = sh_clk_div_set_rate,
368 .round_rate = sh_clk_div_round_rate,
369 .enable = sh_clk_div_enable,
370 .disable = sh_clk_div_disable,
371 .set_parent = sh_clk_div4_set_parent,
372};
373
374int __init sh_clk_div4_register(struct clk *clks, int nr,
375 struct clk_div4_table *table)
376{
377 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
378}
379
380int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
381 struct clk_div4_table *table)
382{
383 return sh_clk_div_register_ops(clks, nr, table,
384 &sh_clk_div_enable_clk_ops);
385}
386
387int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
388 struct clk_div4_table *table)
389{
390 return sh_clk_div_register_ops(clks, nr, table,
391 &sh_clk_div4_reparent_clk_ops);
392}
393
394/* FSI-DIV */
395static unsigned long fsidiv_recalc(struct clk *clk)
396{
397 u32 value;
398
399 value = __raw_readl(clk->mapping->base);
400
401 value >>= 16;
402 if (value < 2)
403 return clk->parent->rate;
404
405 return clk->parent->rate / value;
406}
407
408static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
409{
410 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
411}
412
413static void fsidiv_disable(struct clk *clk)
414{
415 __raw_writel(0, clk->mapping->base);
416}
417
418static int fsidiv_enable(struct clk *clk)
419{
420 u32 value;
421
422 value = __raw_readl(clk->mapping->base) >> 16;
423 if (value < 2)
424 return 0;
425
426 __raw_writel((value << 16) | 0x3, clk->mapping->base);
427
428 return 0;
429}
430
431static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
432{
433 int idx;
434
435 idx = (clk->parent->rate / rate) & 0xffff;
436 if (idx < 2)
437 __raw_writel(0, clk->mapping->base);
438 else
439 __raw_writel(idx << 16, clk->mapping->base);
440
441 return 0;
442}
443
444static struct sh_clk_ops fsidiv_clk_ops = {
445 .recalc = fsidiv_recalc,
446 .round_rate = fsidiv_round_rate,
447 .set_rate = fsidiv_set_rate,
448 .enable = fsidiv_enable,
449 .disable = fsidiv_disable,
450};
451
452int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
453{
454 struct clk_mapping *map;
455 int i;
456
457 for (i = 0; i < nr; i++) {
458
459 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
460 if (!map) {
461 pr_err("%s: unable to alloc memory\n", __func__);
462 return -ENOMEM;
463 }
464
465 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
466 map->phys = (phys_addr_t)clks[i].enable_reg;
467 map->len = 8;
468
469 clks[i].enable_reg = 0; /* remove .enable_reg */
470 clks[i].ops = &fsidiv_clk_ops;
471 clks[i].mapping = map;
472
473 clk_register(&clks[i]);
474 }
475
476 return 0;
477}
1/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 - 2012 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
17static unsigned int sh_clk_read(struct clk *clk)
18{
19 if (clk->flags & CLK_ENABLE_REG_8BIT)
20 return ioread8(clk->mapped_reg);
21 else if (clk->flags & CLK_ENABLE_REG_16BIT)
22 return ioread16(clk->mapped_reg);
23
24 return ioread32(clk->mapped_reg);
25}
26
27static void sh_clk_write(int value, struct clk *clk)
28{
29 if (clk->flags & CLK_ENABLE_REG_8BIT)
30 iowrite8(value, clk->mapped_reg);
31 else if (clk->flags & CLK_ENABLE_REG_16BIT)
32 iowrite16(value, clk->mapped_reg);
33 else
34 iowrite32(value, clk->mapped_reg);
35}
36
37static int sh_clk_mstp_enable(struct clk *clk)
38{
39 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
40 return 0;
41}
42
43static void sh_clk_mstp_disable(struct clk *clk)
44{
45 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
46}
47
48static struct sh_clk_ops sh_clk_mstp_clk_ops = {
49 .enable = sh_clk_mstp_enable,
50 .disable = sh_clk_mstp_disable,
51 .recalc = followparent_recalc,
52};
53
54int __init sh_clk_mstp_register(struct clk *clks, int nr)
55{
56 struct clk *clkp;
57 int ret = 0;
58 int k;
59
60 for (k = 0; !ret && (k < nr); k++) {
61 clkp = clks + k;
62 clkp->ops = &sh_clk_mstp_clk_ops;
63 ret |= clk_register(clkp);
64 }
65
66 return ret;
67}
68
69static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
70{
71 return clk_rate_table_round(clk, clk->freq_table, rate);
72}
73
74static int sh_clk_div6_divisors[64] = {
75 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
76 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
77 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
78 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
79};
80
81static struct clk_div_mult_table sh_clk_div6_table = {
82 .divisors = sh_clk_div6_divisors,
83 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
84};
85
86static unsigned long sh_clk_div6_recalc(struct clk *clk)
87{
88 struct clk_div_mult_table *table = &sh_clk_div6_table;
89 unsigned int idx;
90
91 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
92 table, NULL);
93
94 idx = sh_clk_read(clk) & 0x003f;
95
96 return clk->freq_table[idx].frequency;
97}
98
99static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
100{
101 struct clk_div_mult_table *table = &sh_clk_div6_table;
102 u32 value;
103 int ret, i;
104
105 if (!clk->parent_table || !clk->parent_num)
106 return -EINVAL;
107
108 /* Search the parent */
109 for (i = 0; i < clk->parent_num; i++)
110 if (clk->parent_table[i] == parent)
111 break;
112
113 if (i == clk->parent_num)
114 return -ENODEV;
115
116 ret = clk_reparent(clk, parent);
117 if (ret < 0)
118 return ret;
119
120 value = sh_clk_read(clk) &
121 ~(((1 << clk->src_width) - 1) << clk->src_shift);
122
123 sh_clk_write(value | (i << clk->src_shift), clk);
124
125 /* Rebuild the frequency table */
126 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
127 table, NULL);
128
129 return 0;
130}
131
132static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
133{
134 unsigned long value;
135 int idx;
136
137 idx = clk_rate_table_find(clk, clk->freq_table, rate);
138 if (idx < 0)
139 return idx;
140
141 value = sh_clk_read(clk);
142 value &= ~0x3f;
143 value |= idx;
144 sh_clk_write(value, clk);
145 return 0;
146}
147
148static int sh_clk_div6_enable(struct clk *clk)
149{
150 unsigned long value;
151 int ret;
152
153 ret = sh_clk_div6_set_rate(clk, clk->rate);
154 if (ret == 0) {
155 value = sh_clk_read(clk);
156 value &= ~0x100; /* clear stop bit to enable clock */
157 sh_clk_write(value, clk);
158 }
159 return ret;
160}
161
162static void sh_clk_div6_disable(struct clk *clk)
163{
164 unsigned long value;
165
166 value = sh_clk_read(clk);
167 value |= 0x100; /* stop clock */
168 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
169 sh_clk_write(value, clk);
170}
171
172static struct sh_clk_ops sh_clk_div6_clk_ops = {
173 .recalc = sh_clk_div6_recalc,
174 .round_rate = sh_clk_div_round_rate,
175 .set_rate = sh_clk_div6_set_rate,
176 .enable = sh_clk_div6_enable,
177 .disable = sh_clk_div6_disable,
178};
179
180static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
181 .recalc = sh_clk_div6_recalc,
182 .round_rate = sh_clk_div_round_rate,
183 .set_rate = sh_clk_div6_set_rate,
184 .enable = sh_clk_div6_enable,
185 .disable = sh_clk_div6_disable,
186 .set_parent = sh_clk_div6_set_parent,
187};
188
189static int __init sh_clk_init_parent(struct clk *clk)
190{
191 u32 val;
192
193 if (clk->parent)
194 return 0;
195
196 if (!clk->parent_table || !clk->parent_num)
197 return 0;
198
199 if (!clk->src_width) {
200 pr_err("sh_clk_init_parent: cannot select parent clock\n");
201 return -EINVAL;
202 }
203
204 val = (sh_clk_read(clk) >> clk->src_shift);
205 val &= (1 << clk->src_width) - 1;
206
207 if (val >= clk->parent_num) {
208 pr_err("sh_clk_init_parent: parent table size failed\n");
209 return -EINVAL;
210 }
211
212 clk_reparent(clk, clk->parent_table[val]);
213 if (!clk->parent) {
214 pr_err("sh_clk_init_parent: unable to set parent");
215 return -EINVAL;
216 }
217
218 return 0;
219}
220
221static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
222 struct sh_clk_ops *ops)
223{
224 struct clk *clkp;
225 void *freq_table;
226 int nr_divs = sh_clk_div6_table.nr_divisors;
227 int freq_table_size = sizeof(struct cpufreq_frequency_table);
228 int ret = 0;
229 int k;
230
231 freq_table_size *= (nr_divs + 1);
232 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
233 if (!freq_table) {
234 pr_err("sh_clk_div6_register: unable to alloc memory\n");
235 return -ENOMEM;
236 }
237
238 for (k = 0; !ret && (k < nr); k++) {
239 clkp = clks + k;
240
241 clkp->ops = ops;
242 clkp->freq_table = freq_table + (k * freq_table_size);
243 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
244 ret = clk_register(clkp);
245 if (ret < 0)
246 break;
247
248 ret = sh_clk_init_parent(clkp);
249 }
250
251 return ret;
252}
253
254int __init sh_clk_div6_register(struct clk *clks, int nr)
255{
256 return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
257}
258
259int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
260{
261 return sh_clk_div6_register_ops(clks, nr,
262 &sh_clk_div6_reparent_clk_ops);
263}
264
265static unsigned long sh_clk_div4_recalc(struct clk *clk)
266{
267 struct clk_div4_table *d4t = clk->priv;
268 struct clk_div_mult_table *table = d4t->div_mult_table;
269 unsigned int idx;
270
271 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
272 table, &clk->arch_flags);
273
274 idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f;
275
276 return clk->freq_table[idx].frequency;
277}
278
279static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
280{
281 struct clk_div4_table *d4t = clk->priv;
282 struct clk_div_mult_table *table = d4t->div_mult_table;
283 u32 value;
284 int ret;
285
286 /* we really need a better way to determine parent index, but for
287 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
288 * no CLK_ENABLE_ON_INIT means external clock...
289 */
290
291 if (parent->flags & CLK_ENABLE_ON_INIT)
292 value = sh_clk_read(clk) & ~(1 << 7);
293 else
294 value = sh_clk_read(clk) | (1 << 7);
295
296 ret = clk_reparent(clk, parent);
297 if (ret < 0)
298 return ret;
299
300 sh_clk_write(value, clk);
301
302 /* Rebiuld the frequency table */
303 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
304 table, &clk->arch_flags);
305
306 return 0;
307}
308
309static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
310{
311 struct clk_div4_table *d4t = clk->priv;
312 unsigned long value;
313 int idx = clk_rate_table_find(clk, clk->freq_table, rate);
314 if (idx < 0)
315 return idx;
316
317 value = sh_clk_read(clk);
318 value &= ~(0xf << clk->enable_bit);
319 value |= (idx << clk->enable_bit);
320 sh_clk_write(value, clk);
321
322 if (d4t->kick)
323 d4t->kick(clk);
324
325 return 0;
326}
327
328static int sh_clk_div4_enable(struct clk *clk)
329{
330 sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
331 return 0;
332}
333
334static void sh_clk_div4_disable(struct clk *clk)
335{
336 sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
337}
338
339static struct sh_clk_ops sh_clk_div4_clk_ops = {
340 .recalc = sh_clk_div4_recalc,
341 .set_rate = sh_clk_div4_set_rate,
342 .round_rate = sh_clk_div_round_rate,
343};
344
345static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
346 .recalc = sh_clk_div4_recalc,
347 .set_rate = sh_clk_div4_set_rate,
348 .round_rate = sh_clk_div_round_rate,
349 .enable = sh_clk_div4_enable,
350 .disable = sh_clk_div4_disable,
351};
352
353static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
354 .recalc = sh_clk_div4_recalc,
355 .set_rate = sh_clk_div4_set_rate,
356 .round_rate = sh_clk_div_round_rate,
357 .enable = sh_clk_div4_enable,
358 .disable = sh_clk_div4_disable,
359 .set_parent = sh_clk_div4_set_parent,
360};
361
362static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
363 struct clk_div4_table *table, struct sh_clk_ops *ops)
364{
365 struct clk *clkp;
366 void *freq_table;
367 int nr_divs = table->div_mult_table->nr_divisors;
368 int freq_table_size = sizeof(struct cpufreq_frequency_table);
369 int ret = 0;
370 int k;
371
372 freq_table_size *= (nr_divs + 1);
373 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
374 if (!freq_table) {
375 pr_err("sh_clk_div4_register: unable to alloc memory\n");
376 return -ENOMEM;
377 }
378
379 for (k = 0; !ret && (k < nr); k++) {
380 clkp = clks + k;
381
382 clkp->ops = ops;
383 clkp->priv = table;
384
385 clkp->freq_table = freq_table + (k * freq_table_size);
386 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
387
388 ret = clk_register(clkp);
389 }
390
391 return ret;
392}
393
394int __init sh_clk_div4_register(struct clk *clks, int nr,
395 struct clk_div4_table *table)
396{
397 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
398}
399
400int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
401 struct clk_div4_table *table)
402{
403 return sh_clk_div4_register_ops(clks, nr, table,
404 &sh_clk_div4_enable_clk_ops);
405}
406
407int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
408 struct clk_div4_table *table)
409{
410 return sh_clk_div4_register_ops(clks, nr, table,
411 &sh_clk_div4_reparent_clk_ops);
412}