Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
4 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
5 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
6 *
7 * Simple multiplexer clock implementation
8 */
9
10#include <linux/clk-provider.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/io.h>
14#include <linux/err.h>
15
16/*
17 * DOC: basic adjustable multiplexer clock that cannot gate
18 *
19 * Traits of this clock:
20 * prepare - clk_prepare only ensures that parents are prepared
21 * enable - clk_enable only ensures that parents are enabled
22 * rate - rate is only affected by parent switching. No clk_set_rate support
23 * parent - parent is adjustable through clk_set_parent
24 */
25
26static inline u32 clk_mux_readl(struct clk_mux *mux)
27{
28 if (mux->flags & CLK_MUX_BIG_ENDIAN)
29 return ioread32be(mux->reg);
30
31 return readl(mux->reg);
32}
33
34static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
35{
36 if (mux->flags & CLK_MUX_BIG_ENDIAN)
37 iowrite32be(val, mux->reg);
38 else
39 writel(val, mux->reg);
40}
41
42int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
43 unsigned int val)
44{
45 int num_parents = clk_hw_get_num_parents(hw);
46
47 if (table) {
48 int i;
49
50 for (i = 0; i < num_parents; i++)
51 if (table[i] == val)
52 return i;
53 return -EINVAL;
54 }
55
56 if (val && (flags & CLK_MUX_INDEX_BIT))
57 val = ffs(val) - 1;
58
59 if (val && (flags & CLK_MUX_INDEX_ONE))
60 val--;
61
62 if (val >= num_parents)
63 return -EINVAL;
64
65 return val;
66}
67EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
68
69unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
70{
71 unsigned int val = index;
72
73 if (table) {
74 val = table[index];
75 } else {
76 if (flags & CLK_MUX_INDEX_BIT)
77 val = 1 << index;
78
79 if (flags & CLK_MUX_INDEX_ONE)
80 val++;
81 }
82
83 return val;
84}
85EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
86
87static u8 clk_mux_get_parent(struct clk_hw *hw)
88{
89 struct clk_mux *mux = to_clk_mux(hw);
90 u32 val;
91
92 val = clk_mux_readl(mux) >> mux->shift;
93 val &= mux->mask;
94
95 return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
96}
97
98static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
99{
100 struct clk_mux *mux = to_clk_mux(hw);
101 u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
102 unsigned long flags = 0;
103 u32 reg;
104
105 if (mux->lock)
106 spin_lock_irqsave(mux->lock, flags);
107 else
108 __acquire(mux->lock);
109
110 if (mux->flags & CLK_MUX_HIWORD_MASK) {
111 reg = mux->mask << (mux->shift + 16);
112 } else {
113 reg = clk_mux_readl(mux);
114 reg &= ~(mux->mask << mux->shift);
115 }
116 val = val << mux->shift;
117 reg |= val;
118 clk_mux_writel(mux, reg);
119
120 if (mux->lock)
121 spin_unlock_irqrestore(mux->lock, flags);
122 else
123 __release(mux->lock);
124
125 return 0;
126}
127
128static int clk_mux_determine_rate(struct clk_hw *hw,
129 struct clk_rate_request *req)
130{
131 struct clk_mux *mux = to_clk_mux(hw);
132
133 return clk_mux_determine_rate_flags(hw, req, mux->flags);
134}
135
136const struct clk_ops clk_mux_ops = {
137 .get_parent = clk_mux_get_parent,
138 .set_parent = clk_mux_set_parent,
139 .determine_rate = clk_mux_determine_rate,
140};
141EXPORT_SYMBOL_GPL(clk_mux_ops);
142
143const struct clk_ops clk_mux_ro_ops = {
144 .get_parent = clk_mux_get_parent,
145};
146EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
147
148struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
149 const char * const *parent_names, u8 num_parents,
150 unsigned long flags,
151 void __iomem *reg, u8 shift, u32 mask,
152 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
153{
154 struct clk_mux *mux;
155 struct clk_hw *hw;
156 struct clk_init_data init;
157 u8 width = 0;
158 int ret;
159
160 if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
161 width = fls(mask) - ffs(mask) + 1;
162 if (width + shift > 16) {
163 pr_err("mux value exceeds LOWORD field\n");
164 return ERR_PTR(-EINVAL);
165 }
166 }
167
168 /* allocate the mux */
169 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
170 if (!mux)
171 return ERR_PTR(-ENOMEM);
172
173 init.name = name;
174 if (clk_mux_flags & CLK_MUX_READ_ONLY)
175 init.ops = &clk_mux_ro_ops;
176 else
177 init.ops = &clk_mux_ops;
178 init.flags = flags;
179 init.parent_names = parent_names;
180 init.num_parents = num_parents;
181
182 /* struct clk_mux assignments */
183 mux->reg = reg;
184 mux->shift = shift;
185 mux->mask = mask;
186 mux->flags = clk_mux_flags;
187 mux->lock = lock;
188 mux->table = table;
189 mux->hw.init = &init;
190
191 hw = &mux->hw;
192 ret = clk_hw_register(dev, hw);
193 if (ret) {
194 kfree(mux);
195 hw = ERR_PTR(ret);
196 }
197
198 return hw;
199}
200EXPORT_SYMBOL_GPL(clk_hw_register_mux_table);
201
202struct clk *clk_register_mux_table(struct device *dev, const char *name,
203 const char * const *parent_names, u8 num_parents,
204 unsigned long flags,
205 void __iomem *reg, u8 shift, u32 mask,
206 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
207{
208 struct clk_hw *hw;
209
210 hw = clk_hw_register_mux_table(dev, name, parent_names, num_parents,
211 flags, reg, shift, mask, clk_mux_flags,
212 table, lock);
213 if (IS_ERR(hw))
214 return ERR_CAST(hw);
215 return hw->clk;
216}
217EXPORT_SYMBOL_GPL(clk_register_mux_table);
218
219struct clk *clk_register_mux(struct device *dev, const char *name,
220 const char * const *parent_names, u8 num_parents,
221 unsigned long flags,
222 void __iomem *reg, u8 shift, u8 width,
223 u8 clk_mux_flags, spinlock_t *lock)
224{
225 u32 mask = BIT(width) - 1;
226
227 return clk_register_mux_table(dev, name, parent_names, num_parents,
228 flags, reg, shift, mask, clk_mux_flags,
229 NULL, lock);
230}
231EXPORT_SYMBOL_GPL(clk_register_mux);
232
233struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
234 const char * const *parent_names, u8 num_parents,
235 unsigned long flags,
236 void __iomem *reg, u8 shift, u8 width,
237 u8 clk_mux_flags, spinlock_t *lock)
238{
239 u32 mask = BIT(width) - 1;
240
241 return clk_hw_register_mux_table(dev, name, parent_names, num_parents,
242 flags, reg, shift, mask, clk_mux_flags,
243 NULL, lock);
244}
245EXPORT_SYMBOL_GPL(clk_hw_register_mux);
246
247void clk_unregister_mux(struct clk *clk)
248{
249 struct clk_mux *mux;
250 struct clk_hw *hw;
251
252 hw = __clk_get_hw(clk);
253 if (!hw)
254 return;
255
256 mux = to_clk_mux(hw);
257
258 clk_unregister(clk);
259 kfree(mux);
260}
261EXPORT_SYMBOL_GPL(clk_unregister_mux);
262
263void clk_hw_unregister_mux(struct clk_hw *hw)
264{
265 struct clk_mux *mux;
266
267 mux = to_clk_mux(hw);
268
269 clk_hw_unregister(hw);
270 kfree(mux);
271}
272EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);
1/*
2 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
3 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
4 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Simple multiplexer clock implementation
11 */
12
13#include <linux/clk-provider.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/err.h>
18
19/*
20 * DOC: basic adjustable multiplexer clock that cannot gate
21 *
22 * Traits of this clock:
23 * prepare - clk_prepare only ensures that parents are prepared
24 * enable - clk_enable only ensures that parents are enabled
25 * rate - rate is only affected by parent switching. No clk_set_rate support
26 * parent - parent is adjustable through clk_set_parent
27 */
28
29static u8 clk_mux_get_parent(struct clk_hw *hw)
30{
31 struct clk_mux *mux = to_clk_mux(hw);
32 int num_parents = clk_hw_get_num_parents(hw);
33 u32 val;
34
35 /*
36 * FIXME need a mux-specific flag to determine if val is bitwise or numeric
37 * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
38 * to 0x7 (index starts at one)
39 * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
40 * val = 0x4 really means "bit 2, index starts at bit 0"
41 */
42 val = clk_readl(mux->reg) >> mux->shift;
43 val &= mux->mask;
44
45 if (mux->table) {
46 int i;
47
48 for (i = 0; i < num_parents; i++)
49 if (mux->table[i] == val)
50 return i;
51 return -EINVAL;
52 }
53
54 if (val && (mux->flags & CLK_MUX_INDEX_BIT))
55 val = ffs(val) - 1;
56
57 if (val && (mux->flags & CLK_MUX_INDEX_ONE))
58 val--;
59
60 if (val >= num_parents)
61 return -EINVAL;
62
63 return val;
64}
65
66static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
67{
68 struct clk_mux *mux = to_clk_mux(hw);
69 u32 val;
70 unsigned long flags = 0;
71
72 if (mux->table) {
73 index = mux->table[index];
74 } else {
75 if (mux->flags & CLK_MUX_INDEX_BIT)
76 index = 1 << index;
77
78 if (mux->flags & CLK_MUX_INDEX_ONE)
79 index++;
80 }
81
82 if (mux->lock)
83 spin_lock_irqsave(mux->lock, flags);
84 else
85 __acquire(mux->lock);
86
87 if (mux->flags & CLK_MUX_HIWORD_MASK) {
88 val = mux->mask << (mux->shift + 16);
89 } else {
90 val = clk_readl(mux->reg);
91 val &= ~(mux->mask << mux->shift);
92 }
93 val |= index << mux->shift;
94 clk_writel(val, mux->reg);
95
96 if (mux->lock)
97 spin_unlock_irqrestore(mux->lock, flags);
98 else
99 __release(mux->lock);
100
101 return 0;
102}
103
104const struct clk_ops clk_mux_ops = {
105 .get_parent = clk_mux_get_parent,
106 .set_parent = clk_mux_set_parent,
107 .determine_rate = __clk_mux_determine_rate,
108};
109EXPORT_SYMBOL_GPL(clk_mux_ops);
110
111const struct clk_ops clk_mux_ro_ops = {
112 .get_parent = clk_mux_get_parent,
113};
114EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
115
116struct clk *clk_register_mux_table(struct device *dev, const char *name,
117 const char * const *parent_names, u8 num_parents,
118 unsigned long flags,
119 void __iomem *reg, u8 shift, u32 mask,
120 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
121{
122 struct clk_mux *mux;
123 struct clk *clk;
124 struct clk_init_data init;
125 u8 width = 0;
126
127 if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
128 width = fls(mask) - ffs(mask) + 1;
129 if (width + shift > 16) {
130 pr_err("mux value exceeds LOWORD field\n");
131 return ERR_PTR(-EINVAL);
132 }
133 }
134
135 /* allocate the mux */
136 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
137 if (!mux) {
138 pr_err("%s: could not allocate mux clk\n", __func__);
139 return ERR_PTR(-ENOMEM);
140 }
141
142 init.name = name;
143 if (clk_mux_flags & CLK_MUX_READ_ONLY)
144 init.ops = &clk_mux_ro_ops;
145 else
146 init.ops = &clk_mux_ops;
147 init.flags = flags | CLK_IS_BASIC;
148 init.parent_names = parent_names;
149 init.num_parents = num_parents;
150
151 /* struct clk_mux assignments */
152 mux->reg = reg;
153 mux->shift = shift;
154 mux->mask = mask;
155 mux->flags = clk_mux_flags;
156 mux->lock = lock;
157 mux->table = table;
158 mux->hw.init = &init;
159
160 clk = clk_register(dev, &mux->hw);
161
162 if (IS_ERR(clk))
163 kfree(mux);
164
165 return clk;
166}
167EXPORT_SYMBOL_GPL(clk_register_mux_table);
168
169struct clk *clk_register_mux(struct device *dev, const char *name,
170 const char * const *parent_names, u8 num_parents,
171 unsigned long flags,
172 void __iomem *reg, u8 shift, u8 width,
173 u8 clk_mux_flags, spinlock_t *lock)
174{
175 u32 mask = BIT(width) - 1;
176
177 return clk_register_mux_table(dev, name, parent_names, num_parents,
178 flags, reg, shift, mask, clk_mux_flags,
179 NULL, lock);
180}
181EXPORT_SYMBOL_GPL(clk_register_mux);
182
183void clk_unregister_mux(struct clk *clk)
184{
185 struct clk_mux *mux;
186 struct clk_hw *hw;
187
188 hw = __clk_get_hw(clk);
189 if (!hw)
190 return;
191
192 mux = to_clk_mux(hw);
193
194 clk_unregister(clk);
195 kfree(mux);
196}
197EXPORT_SYMBOL_GPL(clk_unregister_mux);