Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * OMAP APLL clock support
4 *
5 * Copyright (C) 2013 Texas Instruments, Inc.
6 *
7 * J Keerthy <j-keerthy@ti.com>
8 */
9
10#include <linux/clk.h>
11#include <linux/clk-provider.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/err.h>
16#include <linux/string.h>
17#include <linux/log2.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/clk/ti.h>
21#include <linux/delay.h>
22
23#include "clock.h"
24
25#define APLL_FORCE_LOCK 0x1
26#define APLL_AUTO_IDLE 0x2
27#define MAX_APLL_WAIT_TRIES 1000000
28
29#undef pr_fmt
30#define pr_fmt(fmt) "%s: " fmt, __func__
31
32static int dra7_apll_enable(struct clk_hw *hw)
33{
34 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
35 int r = 0, i = 0;
36 struct dpll_data *ad;
37 const char *clk_name;
38 u8 state = 1;
39 u32 v;
40
41 ad = clk->dpll_data;
42 if (!ad)
43 return -EINVAL;
44
45 clk_name = clk_hw_get_name(&clk->hw);
46
47 state <<= __ffs(ad->idlest_mask);
48
49 /* Check is already locked */
50 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
51
52 if ((v & ad->idlest_mask) == state)
53 return r;
54
55 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
56 v &= ~ad->enable_mask;
57 v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask);
58 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
59
60 state <<= __ffs(ad->idlest_mask);
61
62 while (1) {
63 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
64 if ((v & ad->idlest_mask) == state)
65 break;
66 if (i > MAX_APLL_WAIT_TRIES)
67 break;
68 i++;
69 udelay(1);
70 }
71
72 if (i == MAX_APLL_WAIT_TRIES) {
73 pr_warn("clock: %s failed transition to '%s'\n",
74 clk_name, (state) ? "locked" : "bypassed");
75 r = -EBUSY;
76 } else
77 pr_debug("clock: %s transition to '%s' in %d loops\n",
78 clk_name, (state) ? "locked" : "bypassed", i);
79
80 return r;
81}
82
83static void dra7_apll_disable(struct clk_hw *hw)
84{
85 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
86 struct dpll_data *ad;
87 u8 state = 1;
88 u32 v;
89
90 ad = clk->dpll_data;
91
92 state <<= __ffs(ad->idlest_mask);
93
94 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
95 v &= ~ad->enable_mask;
96 v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask);
97 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
98}
99
100static int dra7_apll_is_enabled(struct clk_hw *hw)
101{
102 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
103 struct dpll_data *ad;
104 u32 v;
105
106 ad = clk->dpll_data;
107
108 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
109 v &= ad->enable_mask;
110
111 v >>= __ffs(ad->enable_mask);
112
113 return v == APLL_AUTO_IDLE ? 0 : 1;
114}
115
116static u8 dra7_init_apll_parent(struct clk_hw *hw)
117{
118 return 0;
119}
120
121static const struct clk_ops apll_ck_ops = {
122 .enable = &dra7_apll_enable,
123 .disable = &dra7_apll_disable,
124 .is_enabled = &dra7_apll_is_enabled,
125 .get_parent = &dra7_init_apll_parent,
126};
127
128static void __init omap_clk_register_apll(void *user,
129 struct device_node *node)
130{
131 struct clk_hw *hw = user;
132 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
133 struct dpll_data *ad = clk_hw->dpll_data;
134 const char *name;
135 struct clk *clk;
136 const struct clk_init_data *init = clk_hw->hw.init;
137
138 clk = of_clk_get(node, 0);
139 if (IS_ERR(clk)) {
140 pr_debug("clk-ref for %pOFn not ready, retry\n",
141 node);
142 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
143 return;
144
145 goto cleanup;
146 }
147
148 ad->clk_ref = __clk_get_hw(clk);
149
150 clk = of_clk_get(node, 1);
151 if (IS_ERR(clk)) {
152 pr_debug("clk-bypass for %pOFn not ready, retry\n",
153 node);
154 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
155 return;
156
157 goto cleanup;
158 }
159
160 ad->clk_bypass = __clk_get_hw(clk);
161
162 name = ti_dt_clk_name(node);
163 clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
164 if (!IS_ERR(clk)) {
165 of_clk_add_provider(node, of_clk_src_simple_get, clk);
166 kfree(init->parent_names);
167 kfree(init);
168 return;
169 }
170
171cleanup:
172 kfree(clk_hw->dpll_data);
173 kfree(init->parent_names);
174 kfree(init);
175 kfree(clk_hw);
176}
177
178static void __init of_dra7_apll_setup(struct device_node *node)
179{
180 struct dpll_data *ad = NULL;
181 struct clk_hw_omap *clk_hw = NULL;
182 struct clk_init_data *init = NULL;
183 const char **parent_names = NULL;
184 int ret;
185
186 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
187 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
188 init = kzalloc(sizeof(*init), GFP_KERNEL);
189 if (!ad || !clk_hw || !init)
190 goto cleanup;
191
192 clk_hw->dpll_data = ad;
193 clk_hw->hw.init = init;
194
195 init->name = ti_dt_clk_name(node);
196 init->ops = &apll_ck_ops;
197
198 init->num_parents = of_clk_get_parent_count(node);
199 if (init->num_parents < 1) {
200 pr_err("dra7 apll %pOFn must have parent(s)\n", node);
201 goto cleanup;
202 }
203
204 parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
205 if (!parent_names)
206 goto cleanup;
207
208 of_clk_parent_fill(node, parent_names, init->num_parents);
209
210 init->parent_names = parent_names;
211
212 ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
213 ret |= ti_clk_get_reg_addr(node, 1, &ad->idlest_reg);
214
215 if (ret)
216 goto cleanup;
217
218 ad->idlest_mask = 0x1;
219 ad->enable_mask = 0x3;
220
221 omap_clk_register_apll(&clk_hw->hw, node);
222 return;
223
224cleanup:
225 kfree(parent_names);
226 kfree(ad);
227 kfree(clk_hw);
228 kfree(init);
229}
230CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
231
232#define OMAP2_EN_APLL_LOCKED 0x3
233#define OMAP2_EN_APLL_STOPPED 0x0
234
235static int omap2_apll_is_enabled(struct clk_hw *hw)
236{
237 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
238 struct dpll_data *ad = clk->dpll_data;
239 u32 v;
240
241 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
242 v &= ad->enable_mask;
243
244 v >>= __ffs(ad->enable_mask);
245
246 return v == OMAP2_EN_APLL_LOCKED ? 1 : 0;
247}
248
249static unsigned long omap2_apll_recalc(struct clk_hw *hw,
250 unsigned long parent_rate)
251{
252 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
253
254 if (omap2_apll_is_enabled(hw))
255 return clk->fixed_rate;
256
257 return 0;
258}
259
260static int omap2_apll_enable(struct clk_hw *hw)
261{
262 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
263 struct dpll_data *ad = clk->dpll_data;
264 u32 v;
265 int i = 0;
266
267 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
268 v &= ~ad->enable_mask;
269 v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask);
270 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
271
272 while (1) {
273 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
274 if (v & ad->idlest_mask)
275 break;
276 if (i > MAX_APLL_WAIT_TRIES)
277 break;
278 i++;
279 udelay(1);
280 }
281
282 if (i == MAX_APLL_WAIT_TRIES) {
283 pr_warn("%s failed to transition to locked\n",
284 clk_hw_get_name(&clk->hw));
285 return -EBUSY;
286 }
287
288 return 0;
289}
290
291static void omap2_apll_disable(struct clk_hw *hw)
292{
293 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
294 struct dpll_data *ad = clk->dpll_data;
295 u32 v;
296
297 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
298 v &= ~ad->enable_mask;
299 v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask);
300 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
301}
302
303static const struct clk_ops omap2_apll_ops = {
304 .enable = &omap2_apll_enable,
305 .disable = &omap2_apll_disable,
306 .is_enabled = &omap2_apll_is_enabled,
307 .recalc_rate = &omap2_apll_recalc,
308};
309
310static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val)
311{
312 struct dpll_data *ad = clk->dpll_data;
313 u32 v;
314
315 v = ti_clk_ll_ops->clk_readl(&ad->autoidle_reg);
316 v &= ~ad->autoidle_mask;
317 v |= val << __ffs(ad->autoidle_mask);
318 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
319}
320
321#define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3
322#define OMAP2_APLL_AUTOIDLE_DISABLE 0x0
323
324static void omap2_apll_allow_idle(struct clk_hw_omap *clk)
325{
326 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP);
327}
328
329static void omap2_apll_deny_idle(struct clk_hw_omap *clk)
330{
331 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE);
332}
333
334static const struct clk_hw_omap_ops omap2_apll_hwops = {
335 .allow_idle = &omap2_apll_allow_idle,
336 .deny_idle = &omap2_apll_deny_idle,
337};
338
339static void __init of_omap2_apll_setup(struct device_node *node)
340{
341 struct dpll_data *ad = NULL;
342 struct clk_hw_omap *clk_hw = NULL;
343 struct clk_init_data *init = NULL;
344 const char *name;
345 struct clk *clk;
346 const char *parent_name;
347 u32 val;
348 int ret;
349
350 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
351 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
352 init = kzalloc(sizeof(*init), GFP_KERNEL);
353
354 if (!ad || !clk_hw || !init)
355 goto cleanup;
356
357 clk_hw->dpll_data = ad;
358 clk_hw->hw.init = init;
359 init->ops = &omap2_apll_ops;
360 name = ti_dt_clk_name(node);
361 init->name = name;
362 clk_hw->ops = &omap2_apll_hwops;
363
364 init->num_parents = of_clk_get_parent_count(node);
365 if (init->num_parents != 1) {
366 pr_err("%pOFn must have one parent\n", node);
367 goto cleanup;
368 }
369
370 parent_name = of_clk_get_parent_name(node, 0);
371 init->parent_names = &parent_name;
372
373 if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
374 pr_err("%pOFn missing clock-frequency\n", node);
375 goto cleanup;
376 }
377 clk_hw->fixed_rate = val;
378
379 clk_hw->enable_bit = ti_clk_get_legacy_bit_shift(node);
380 ad->enable_mask = 0x3 << clk_hw->enable_bit;
381 ad->autoidle_mask = 0x3 << clk_hw->enable_bit;
382
383 if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
384 pr_err("%pOFn missing idlest-shift\n", node);
385 goto cleanup;
386 }
387
388 ad->idlest_mask = 1 << val;
389
390 ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
391 ret |= ti_clk_get_reg_addr(node, 1, &ad->autoidle_reg);
392 ret |= ti_clk_get_reg_addr(node, 2, &ad->idlest_reg);
393
394 if (ret)
395 goto cleanup;
396
397 name = ti_dt_clk_name(node);
398 clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
399 if (!IS_ERR(clk)) {
400 of_clk_add_provider(node, of_clk_src_simple_get, clk);
401 kfree(init);
402 return;
403 }
404cleanup:
405 kfree(ad);
406 kfree(clk_hw);
407 kfree(init);
408}
409CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock",
410 of_omap2_apll_setup);
1/*
2 * OMAP APLL clock support
3 *
4 * Copyright (C) 2013 Texas Instruments, Inc.
5 *
6 * J Keerthy <j-keerthy@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/clk-provider.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/io.h>
23#include <linux/err.h>
24#include <linux/string.h>
25#include <linux/log2.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/clk/ti.h>
29#include <linux/delay.h>
30
31#include "clock.h"
32
33#define APLL_FORCE_LOCK 0x1
34#define APLL_AUTO_IDLE 0x2
35#define MAX_APLL_WAIT_TRIES 1000000
36
37#undef pr_fmt
38#define pr_fmt(fmt) "%s: " fmt, __func__
39
40static int dra7_apll_enable(struct clk_hw *hw)
41{
42 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
43 int r = 0, i = 0;
44 struct dpll_data *ad;
45 const char *clk_name;
46 u8 state = 1;
47 u32 v;
48
49 ad = clk->dpll_data;
50 if (!ad)
51 return -EINVAL;
52
53 clk_name = clk_hw_get_name(&clk->hw);
54
55 state <<= __ffs(ad->idlest_mask);
56
57 /* Check is already locked */
58 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
59
60 if ((v & ad->idlest_mask) == state)
61 return r;
62
63 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
64 v &= ~ad->enable_mask;
65 v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask);
66 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
67
68 state <<= __ffs(ad->idlest_mask);
69
70 while (1) {
71 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
72 if ((v & ad->idlest_mask) == state)
73 break;
74 if (i > MAX_APLL_WAIT_TRIES)
75 break;
76 i++;
77 udelay(1);
78 }
79
80 if (i == MAX_APLL_WAIT_TRIES) {
81 pr_warn("clock: %s failed transition to '%s'\n",
82 clk_name, (state) ? "locked" : "bypassed");
83 r = -EBUSY;
84 } else
85 pr_debug("clock: %s transition to '%s' in %d loops\n",
86 clk_name, (state) ? "locked" : "bypassed", i);
87
88 return r;
89}
90
91static void dra7_apll_disable(struct clk_hw *hw)
92{
93 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
94 struct dpll_data *ad;
95 u8 state = 1;
96 u32 v;
97
98 ad = clk->dpll_data;
99
100 state <<= __ffs(ad->idlest_mask);
101
102 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
103 v &= ~ad->enable_mask;
104 v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask);
105 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
106}
107
108static int dra7_apll_is_enabled(struct clk_hw *hw)
109{
110 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
111 struct dpll_data *ad;
112 u32 v;
113
114 ad = clk->dpll_data;
115
116 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
117 v &= ad->enable_mask;
118
119 v >>= __ffs(ad->enable_mask);
120
121 return v == APLL_AUTO_IDLE ? 0 : 1;
122}
123
124static u8 dra7_init_apll_parent(struct clk_hw *hw)
125{
126 return 0;
127}
128
129static const struct clk_ops apll_ck_ops = {
130 .enable = &dra7_apll_enable,
131 .disable = &dra7_apll_disable,
132 .is_enabled = &dra7_apll_is_enabled,
133 .get_parent = &dra7_init_apll_parent,
134};
135
136static void __init omap_clk_register_apll(void *user,
137 struct device_node *node)
138{
139 struct clk_hw *hw = user;
140 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
141 struct dpll_data *ad = clk_hw->dpll_data;
142 struct clk *clk;
143 const struct clk_init_data *init = clk_hw->hw.init;
144
145 clk = of_clk_get(node, 0);
146 if (IS_ERR(clk)) {
147 pr_debug("clk-ref for %pOFn not ready, retry\n",
148 node);
149 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
150 return;
151
152 goto cleanup;
153 }
154
155 ad->clk_ref = __clk_get_hw(clk);
156
157 clk = of_clk_get(node, 1);
158 if (IS_ERR(clk)) {
159 pr_debug("clk-bypass for %pOFn not ready, retry\n",
160 node);
161 if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
162 return;
163
164 goto cleanup;
165 }
166
167 ad->clk_bypass = __clk_get_hw(clk);
168
169 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
170 if (!IS_ERR(clk)) {
171 of_clk_add_provider(node, of_clk_src_simple_get, clk);
172 kfree(init->parent_names);
173 kfree(init);
174 return;
175 }
176
177cleanup:
178 kfree(clk_hw->dpll_data);
179 kfree(init->parent_names);
180 kfree(init);
181 kfree(clk_hw);
182}
183
184static void __init of_dra7_apll_setup(struct device_node *node)
185{
186 struct dpll_data *ad = NULL;
187 struct clk_hw_omap *clk_hw = NULL;
188 struct clk_init_data *init = NULL;
189 const char **parent_names = NULL;
190 int ret;
191
192 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
193 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
194 init = kzalloc(sizeof(*init), GFP_KERNEL);
195 if (!ad || !clk_hw || !init)
196 goto cleanup;
197
198 clk_hw->dpll_data = ad;
199 clk_hw->hw.init = init;
200
201 init->name = node->name;
202 init->ops = &apll_ck_ops;
203
204 init->num_parents = of_clk_get_parent_count(node);
205 if (init->num_parents < 1) {
206 pr_err("dra7 apll %pOFn must have parent(s)\n", node);
207 goto cleanup;
208 }
209
210 parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
211 if (!parent_names)
212 goto cleanup;
213
214 of_clk_parent_fill(node, parent_names, init->num_parents);
215
216 init->parent_names = parent_names;
217
218 ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
219 ret |= ti_clk_get_reg_addr(node, 1, &ad->idlest_reg);
220
221 if (ret)
222 goto cleanup;
223
224 ad->idlest_mask = 0x1;
225 ad->enable_mask = 0x3;
226
227 omap_clk_register_apll(&clk_hw->hw, node);
228 return;
229
230cleanup:
231 kfree(parent_names);
232 kfree(ad);
233 kfree(clk_hw);
234 kfree(init);
235}
236CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
237
238#define OMAP2_EN_APLL_LOCKED 0x3
239#define OMAP2_EN_APLL_STOPPED 0x0
240
241static int omap2_apll_is_enabled(struct clk_hw *hw)
242{
243 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
244 struct dpll_data *ad = clk->dpll_data;
245 u32 v;
246
247 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
248 v &= ad->enable_mask;
249
250 v >>= __ffs(ad->enable_mask);
251
252 return v == OMAP2_EN_APLL_LOCKED ? 1 : 0;
253}
254
255static unsigned long omap2_apll_recalc(struct clk_hw *hw,
256 unsigned long parent_rate)
257{
258 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
259
260 if (omap2_apll_is_enabled(hw))
261 return clk->fixed_rate;
262
263 return 0;
264}
265
266static int omap2_apll_enable(struct clk_hw *hw)
267{
268 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
269 struct dpll_data *ad = clk->dpll_data;
270 u32 v;
271 int i = 0;
272
273 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
274 v &= ~ad->enable_mask;
275 v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask);
276 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
277
278 while (1) {
279 v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
280 if (v & ad->idlest_mask)
281 break;
282 if (i > MAX_APLL_WAIT_TRIES)
283 break;
284 i++;
285 udelay(1);
286 }
287
288 if (i == MAX_APLL_WAIT_TRIES) {
289 pr_warn("%s failed to transition to locked\n",
290 clk_hw_get_name(&clk->hw));
291 return -EBUSY;
292 }
293
294 return 0;
295}
296
297static void omap2_apll_disable(struct clk_hw *hw)
298{
299 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
300 struct dpll_data *ad = clk->dpll_data;
301 u32 v;
302
303 v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
304 v &= ~ad->enable_mask;
305 v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask);
306 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
307}
308
309static const struct clk_ops omap2_apll_ops = {
310 .enable = &omap2_apll_enable,
311 .disable = &omap2_apll_disable,
312 .is_enabled = &omap2_apll_is_enabled,
313 .recalc_rate = &omap2_apll_recalc,
314};
315
316static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val)
317{
318 struct dpll_data *ad = clk->dpll_data;
319 u32 v;
320
321 v = ti_clk_ll_ops->clk_readl(&ad->autoidle_reg);
322 v &= ~ad->autoidle_mask;
323 v |= val << __ffs(ad->autoidle_mask);
324 ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
325}
326
327#define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3
328#define OMAP2_APLL_AUTOIDLE_DISABLE 0x0
329
330static void omap2_apll_allow_idle(struct clk_hw_omap *clk)
331{
332 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP);
333}
334
335static void omap2_apll_deny_idle(struct clk_hw_omap *clk)
336{
337 omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE);
338}
339
340static const struct clk_hw_omap_ops omap2_apll_hwops = {
341 .allow_idle = &omap2_apll_allow_idle,
342 .deny_idle = &omap2_apll_deny_idle,
343};
344
345static void __init of_omap2_apll_setup(struct device_node *node)
346{
347 struct dpll_data *ad = NULL;
348 struct clk_hw_omap *clk_hw = NULL;
349 struct clk_init_data *init = NULL;
350 struct clk *clk;
351 const char *parent_name;
352 u32 val;
353 int ret;
354
355 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
356 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
357 init = kzalloc(sizeof(*init), GFP_KERNEL);
358
359 if (!ad || !clk_hw || !init)
360 goto cleanup;
361
362 clk_hw->dpll_data = ad;
363 clk_hw->hw.init = init;
364 init->ops = &omap2_apll_ops;
365 init->name = node->name;
366 clk_hw->ops = &omap2_apll_hwops;
367
368 init->num_parents = of_clk_get_parent_count(node);
369 if (init->num_parents != 1) {
370 pr_err("%pOFn must have one parent\n", node);
371 goto cleanup;
372 }
373
374 parent_name = of_clk_get_parent_name(node, 0);
375 init->parent_names = &parent_name;
376
377 if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
378 pr_err("%pOFn missing clock-frequency\n", node);
379 goto cleanup;
380 }
381 clk_hw->fixed_rate = val;
382
383 if (of_property_read_u32(node, "ti,bit-shift", &val)) {
384 pr_err("%pOFn missing bit-shift\n", node);
385 goto cleanup;
386 }
387
388 clk_hw->enable_bit = val;
389 ad->enable_mask = 0x3 << val;
390 ad->autoidle_mask = 0x3 << val;
391
392 if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
393 pr_err("%pOFn missing idlest-shift\n", node);
394 goto cleanup;
395 }
396
397 ad->idlest_mask = 1 << val;
398
399 ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
400 ret |= ti_clk_get_reg_addr(node, 1, &ad->autoidle_reg);
401 ret |= ti_clk_get_reg_addr(node, 2, &ad->idlest_reg);
402
403 if (ret)
404 goto cleanup;
405
406 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
407 if (!IS_ERR(clk)) {
408 of_clk_add_provider(node, of_clk_src_simple_get, clk);
409 kfree(init);
410 return;
411 }
412cleanup:
413 kfree(ad);
414 kfree(clk_hw);
415 kfree(init);
416}
417CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock",
418 of_omap2_apll_setup);