Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2012 Freescale Semiconductor, Inc.
4 * Copyright 2012 Linaro Ltd.
5 */
6
7#include <linux/bits.h>
8#include <linux/clk.h>
9#include <linux/clk-provider.h>
10#include <linux/io.h>
11#include <linux/slab.h>
12#include <linux/jiffies.h>
13#include <linux/err.h>
14#include "clk.h"
15
16static int clk_busy_wait(void __iomem *reg, u8 shift)
17{
18 unsigned long timeout = jiffies + msecs_to_jiffies(10);
19
20 while (readl_relaxed(reg) & (1 << shift))
21 if (time_after(jiffies, timeout))
22 return -ETIMEDOUT;
23
24 return 0;
25}
26
27struct clk_busy_divider {
28 struct clk_divider div;
29 const struct clk_ops *div_ops;
30 void __iomem *reg;
31 u8 shift;
32};
33
34static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
35{
36 struct clk_divider *div = to_clk_divider(hw);
37
38 return container_of(div, struct clk_busy_divider, div);
39}
40
41static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
42 unsigned long parent_rate)
43{
44 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
45
46 return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
47}
48
49static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
50 unsigned long *prate)
51{
52 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
53
54 return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
55}
56
57static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
58 unsigned long parent_rate)
59{
60 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
61 int ret;
62
63 ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
64 if (!ret)
65 ret = clk_busy_wait(busy->reg, busy->shift);
66
67 return ret;
68}
69
70static const struct clk_ops clk_busy_divider_ops = {
71 .recalc_rate = clk_busy_divider_recalc_rate,
72 .round_rate = clk_busy_divider_round_rate,
73 .set_rate = clk_busy_divider_set_rate,
74};
75
76struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
77 void __iomem *reg, u8 shift, u8 width,
78 void __iomem *busy_reg, u8 busy_shift)
79{
80 struct clk_busy_divider *busy;
81 struct clk_hw *hw;
82 struct clk_init_data init;
83 int ret;
84
85 busy = kzalloc(sizeof(*busy), GFP_KERNEL);
86 if (!busy)
87 return ERR_PTR(-ENOMEM);
88
89 busy->reg = busy_reg;
90 busy->shift = busy_shift;
91
92 busy->div.reg = reg;
93 busy->div.shift = shift;
94 busy->div.width = width;
95 busy->div.lock = &imx_ccm_lock;
96 busy->div_ops = &clk_divider_ops;
97
98 init.name = name;
99 init.ops = &clk_busy_divider_ops;
100 init.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL;
101 init.parent_names = &parent_name;
102 init.num_parents = 1;
103
104 busy->div.hw.init = &init;
105
106 hw = &busy->div.hw;
107
108 ret = clk_hw_register(NULL, hw);
109 if (ret) {
110 kfree(busy);
111 return ERR_PTR(ret);
112 }
113
114 return hw;
115}
116
117struct clk_busy_mux {
118 struct clk_mux mux;
119 const struct clk_ops *mux_ops;
120 void __iomem *reg;
121 u8 shift;
122};
123
124static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
125{
126 struct clk_mux *mux = to_clk_mux(hw);
127
128 return container_of(mux, struct clk_busy_mux, mux);
129}
130
131static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
132{
133 struct clk_busy_mux *busy = to_clk_busy_mux(hw);
134
135 return busy->mux_ops->get_parent(&busy->mux.hw);
136}
137
138static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
139{
140 struct clk_busy_mux *busy = to_clk_busy_mux(hw);
141 int ret;
142
143 ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
144 if (!ret)
145 ret = clk_busy_wait(busy->reg, busy->shift);
146
147 return ret;
148}
149
150static const struct clk_ops clk_busy_mux_ops = {
151 .determine_rate = clk_hw_determine_rate_no_reparent,
152 .get_parent = clk_busy_mux_get_parent,
153 .set_parent = clk_busy_mux_set_parent,
154};
155
156struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift,
157 u8 width, void __iomem *busy_reg, u8 busy_shift,
158 const char * const *parent_names, int num_parents)
159{
160 struct clk_busy_mux *busy;
161 struct clk_hw *hw;
162 struct clk_init_data init;
163 int ret;
164
165 busy = kzalloc(sizeof(*busy), GFP_KERNEL);
166 if (!busy)
167 return ERR_PTR(-ENOMEM);
168
169 busy->reg = busy_reg;
170 busy->shift = busy_shift;
171
172 busy->mux.reg = reg;
173 busy->mux.shift = shift;
174 busy->mux.mask = BIT(width) - 1;
175 busy->mux.lock = &imx_ccm_lock;
176 busy->mux_ops = &clk_mux_ops;
177
178 init.name = name;
179 init.ops = &clk_busy_mux_ops;
180 init.flags = CLK_IS_CRITICAL;
181 init.parent_names = parent_names;
182 init.num_parents = num_parents;
183
184 busy->mux.hw.init = &init;
185
186 hw = &busy->mux.hw;
187
188 ret = clk_hw_register(NULL, hw);
189 if (ret) {
190 kfree(busy);
191 return ERR_PTR(ret);
192 }
193
194 return hw;
195}
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 * Copyright 2012 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/jiffies.h>
18#include <linux/err.h>
19#include "clk.h"
20
21static int clk_busy_wait(void __iomem *reg, u8 shift)
22{
23 unsigned long timeout = jiffies + msecs_to_jiffies(10);
24
25 while (readl_relaxed(reg) & (1 << shift))
26 if (time_after(jiffies, timeout))
27 return -ETIMEDOUT;
28
29 return 0;
30}
31
32struct clk_busy_divider {
33 struct clk_divider div;
34 const struct clk_ops *div_ops;
35 void __iomem *reg;
36 u8 shift;
37};
38
39static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
40{
41 struct clk_divider *div = to_clk_divider(hw);
42
43 return container_of(div, struct clk_busy_divider, div);
44}
45
46static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
47 unsigned long parent_rate)
48{
49 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
50
51 return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
52}
53
54static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
55 unsigned long *prate)
56{
57 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
58
59 return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
60}
61
62static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
63 unsigned long parent_rate)
64{
65 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
66 int ret;
67
68 ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
69 if (!ret)
70 ret = clk_busy_wait(busy->reg, busy->shift);
71
72 return ret;
73}
74
75static const struct clk_ops clk_busy_divider_ops = {
76 .recalc_rate = clk_busy_divider_recalc_rate,
77 .round_rate = clk_busy_divider_round_rate,
78 .set_rate = clk_busy_divider_set_rate,
79};
80
81struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
82 void __iomem *reg, u8 shift, u8 width,
83 void __iomem *busy_reg, u8 busy_shift)
84{
85 struct clk_busy_divider *busy;
86 struct clk *clk;
87 struct clk_init_data init;
88
89 busy = kzalloc(sizeof(*busy), GFP_KERNEL);
90 if (!busy)
91 return ERR_PTR(-ENOMEM);
92
93 busy->reg = busy_reg;
94 busy->shift = busy_shift;
95
96 busy->div.reg = reg;
97 busy->div.shift = shift;
98 busy->div.width = width;
99 busy->div.lock = &imx_ccm_lock;
100 busy->div_ops = &clk_divider_ops;
101
102 init.name = name;
103 init.ops = &clk_busy_divider_ops;
104 init.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL;
105 init.parent_names = &parent_name;
106 init.num_parents = 1;
107
108 busy->div.hw.init = &init;
109
110 clk = clk_register(NULL, &busy->div.hw);
111 if (IS_ERR(clk))
112 kfree(busy);
113
114 return clk;
115}
116
117struct clk_busy_mux {
118 struct clk_mux mux;
119 const struct clk_ops *mux_ops;
120 void __iomem *reg;
121 u8 shift;
122};
123
124static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
125{
126 struct clk_mux *mux = to_clk_mux(hw);
127
128 return container_of(mux, struct clk_busy_mux, mux);
129}
130
131static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
132{
133 struct clk_busy_mux *busy = to_clk_busy_mux(hw);
134
135 return busy->mux_ops->get_parent(&busy->mux.hw);
136}
137
138static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
139{
140 struct clk_busy_mux *busy = to_clk_busy_mux(hw);
141 int ret;
142
143 ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
144 if (!ret)
145 ret = clk_busy_wait(busy->reg, busy->shift);
146
147 return ret;
148}
149
150static const struct clk_ops clk_busy_mux_ops = {
151 .get_parent = clk_busy_mux_get_parent,
152 .set_parent = clk_busy_mux_set_parent,
153};
154
155struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
156 u8 width, void __iomem *busy_reg, u8 busy_shift,
157 const char **parent_names, int num_parents)
158{
159 struct clk_busy_mux *busy;
160 struct clk *clk;
161 struct clk_init_data init;
162
163 busy = kzalloc(sizeof(*busy), GFP_KERNEL);
164 if (!busy)
165 return ERR_PTR(-ENOMEM);
166
167 busy->reg = busy_reg;
168 busy->shift = busy_shift;
169
170 busy->mux.reg = reg;
171 busy->mux.shift = shift;
172 busy->mux.mask = BIT(width) - 1;
173 busy->mux.lock = &imx_ccm_lock;
174 busy->mux_ops = &clk_mux_ops;
175
176 init.name = name;
177 init.ops = &clk_busy_mux_ops;
178 init.flags = CLK_IS_CRITICAL;
179 init.parent_names = parent_names;
180 init.num_parents = num_parents;
181
182 busy->mux.hw.init = &init;
183
184 clk = clk_register(NULL, &busy->mux.hw);
185 if (IS_ERR(clk))
186 kfree(busy);
187
188 return clk;
189}