Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (c) 2014 MundoReader S.L.
  4 * Author: Heiko Stuebner <heiko@sntech.de>
  5 *
  6 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
  7 * Author: Xing Zheng <zhengxing@rock-chips.com>
  8 *
  9 * based on
 10 *
 11 * samsung/clk.c
 12 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
 13 * Copyright (c) 2013 Linaro Ltd.
 14 * Author: Thomas Abraham <thomas.ab@samsung.com>
 15 */
 16
 17#include <linux/slab.h>
 18#include <linux/clk.h>
 19#include <linux/clk-provider.h>
 20#include <linux/io.h>
 21#include <linux/mfd/syscon.h>
 22#include <linux/regmap.h>
 23#include <linux/reboot.h>
 24#include <linux/rational.h>
 25#include "clk.h"
 26
 27/**
 28 * Register a clock branch.
 29 * Most clock branches have a form like
 30 *
 31 * src1 --|--\
 32 *        |M |--[GATE]-[DIV]-
 33 * src2 --|--/
 34 *
 35 * sometimes without one of those components.
 36 */
 37static struct clk *rockchip_clk_register_branch(const char *name,
 38		const char *const *parent_names, u8 num_parents,
 39		void __iomem *base,
 40		int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
 41		int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
 42		struct clk_div_table *div_table, int gate_offset,
 43		u8 gate_shift, u8 gate_flags, unsigned long flags,
 44		spinlock_t *lock)
 45{
 46	struct clk *clk;
 47	struct clk_mux *mux = NULL;
 48	struct clk_gate *gate = NULL;
 49	struct clk_divider *div = NULL;
 50	const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
 51			     *gate_ops = NULL;
 52	int ret;
 53
 54	if (num_parents > 1) {
 55		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
 56		if (!mux)
 57			return ERR_PTR(-ENOMEM);
 58
 59		mux->reg = base + muxdiv_offset;
 60		mux->shift = mux_shift;
 61		mux->mask = BIT(mux_width) - 1;
 62		mux->flags = mux_flags;
 63		mux->lock = lock;
 64		mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
 65							: &clk_mux_ops;
 66	}
 67
 68	if (gate_offset >= 0) {
 69		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
 70		if (!gate) {
 71			ret = -ENOMEM;
 72			goto err_gate;
 73		}
 74
 75		gate->flags = gate_flags;
 76		gate->reg = base + gate_offset;
 77		gate->bit_idx = gate_shift;
 78		gate->lock = lock;
 79		gate_ops = &clk_gate_ops;
 80	}
 81
 82	if (div_width > 0) {
 83		div = kzalloc(sizeof(*div), GFP_KERNEL);
 84		if (!div) {
 85			ret = -ENOMEM;
 86			goto err_div;
 87		}
 88
 89		div->flags = div_flags;
 90		if (div_offset)
 91			div->reg = base + div_offset;
 92		else
 93			div->reg = base + muxdiv_offset;
 94		div->shift = div_shift;
 95		div->width = div_width;
 96		div->lock = lock;
 97		div->table = div_table;
 98		div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
 99						? &clk_divider_ro_ops
100						: &clk_divider_ops;
101	}
102
103	clk = clk_register_composite(NULL, name, parent_names, num_parents,
104				     mux ? &mux->hw : NULL, mux_ops,
105				     div ? &div->hw : NULL, div_ops,
106				     gate ? &gate->hw : NULL, gate_ops,
107				     flags);
108
109	if (IS_ERR(clk)) {
110		ret = PTR_ERR(clk);
111		goto err_composite;
112	}
113
114	return clk;
115err_composite:
116	kfree(div);
117err_div:
118	kfree(gate);
119err_gate:
120	kfree(mux);
121	return ERR_PTR(ret);
122}
123
124struct rockchip_clk_frac {
125	struct notifier_block			clk_nb;
126	struct clk_fractional_divider		div;
127	struct clk_gate				gate;
128
129	struct clk_mux				mux;
130	const struct clk_ops			*mux_ops;
131	int					mux_frac_idx;
132
133	bool					rate_change_remuxed;
134	int					rate_change_idx;
135};
136
137#define to_rockchip_clk_frac_nb(nb) \
138			container_of(nb, struct rockchip_clk_frac, clk_nb)
139
140static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
141					 unsigned long event, void *data)
142{
143	struct clk_notifier_data *ndata = data;
144	struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
145	struct clk_mux *frac_mux = &frac->mux;
146	int ret = 0;
147
148	pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
149		 __func__, event, ndata->old_rate, ndata->new_rate);
150	if (event == PRE_RATE_CHANGE) {
151		frac->rate_change_idx =
152				frac->mux_ops->get_parent(&frac_mux->hw);
153		if (frac->rate_change_idx != frac->mux_frac_idx) {
154			frac->mux_ops->set_parent(&frac_mux->hw,
155						  frac->mux_frac_idx);
156			frac->rate_change_remuxed = 1;
157		}
158	} else if (event == POST_RATE_CHANGE) {
159		/*
160		 * The POST_RATE_CHANGE notifier runs directly after the
161		 * divider clock is set in clk_change_rate, so we'll have
162		 * remuxed back to the original parent before clk_change_rate
163		 * reaches the mux itself.
164		 */
165		if (frac->rate_change_remuxed) {
166			frac->mux_ops->set_parent(&frac_mux->hw,
167						  frac->rate_change_idx);
168			frac->rate_change_remuxed = 0;
169		}
170	}
171
172	return notifier_from_errno(ret);
173}
174
175/**
176 * fractional divider must set that denominator is 20 times larger than
177 * numerator to generate precise clock frequency.
178 */
179static void rockchip_fractional_approximation(struct clk_hw *hw,
180		unsigned long rate, unsigned long *parent_rate,
181		unsigned long *m, unsigned long *n)
182{
183	struct clk_fractional_divider *fd = to_clk_fd(hw);
184	unsigned long p_rate, p_parent_rate;
185	struct clk_hw *p_parent;
186	unsigned long scale;
187
188	p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
189	if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
190		p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
191		p_parent_rate = clk_hw_get_rate(p_parent);
192		*parent_rate = p_parent_rate;
193	}
194
195	/*
196	 * Get rate closer to *parent_rate to guarantee there is no overflow
197	 * for m and n. In the result it will be the nearest rate left shifted
198	 * by (scale - fd->nwidth) bits.
199	 */
200	scale = fls_long(*parent_rate / rate - 1);
201	if (scale > fd->nwidth)
202		rate <<= scale - fd->nwidth;
203
204	rational_best_approximation(rate, *parent_rate,
205			GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
206			m, n);
207}
208
209static struct clk *rockchip_clk_register_frac_branch(
210		struct rockchip_clk_provider *ctx, const char *name,
211		const char *const *parent_names, u8 num_parents,
212		void __iomem *base, int muxdiv_offset, u8 div_flags,
213		int gate_offset, u8 gate_shift, u8 gate_flags,
214		unsigned long flags, struct rockchip_clk_branch *child,
215		spinlock_t *lock)
216{
217	struct rockchip_clk_frac *frac;
218	struct clk *clk;
219	struct clk_gate *gate = NULL;
220	struct clk_fractional_divider *div = NULL;
221	const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
222
223	if (muxdiv_offset < 0)
224		return ERR_PTR(-EINVAL);
225
226	if (child && child->branch_type != branch_mux) {
227		pr_err("%s: fractional child clock for %s can only be a mux\n",
228		       __func__, name);
229		return ERR_PTR(-EINVAL);
230	}
231
232	frac = kzalloc(sizeof(*frac), GFP_KERNEL);
233	if (!frac)
234		return ERR_PTR(-ENOMEM);
235
236	if (gate_offset >= 0) {
237		gate = &frac->gate;
238		gate->flags = gate_flags;
239		gate->reg = base + gate_offset;
240		gate->bit_idx = gate_shift;
241		gate->lock = lock;
242		gate_ops = &clk_gate_ops;
243	}
244
245	div = &frac->div;
246	div->flags = div_flags;
247	div->reg = base + muxdiv_offset;
248	div->mshift = 16;
249	div->mwidth = 16;
250	div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
251	div->nshift = 0;
252	div->nwidth = 16;
253	div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
254	div->lock = lock;
255	div->approximation = rockchip_fractional_approximation;
256	div_ops = &clk_fractional_divider_ops;
257
258	clk = clk_register_composite(NULL, name, parent_names, num_parents,
259				     NULL, NULL,
260				     &div->hw, div_ops,
261				     gate ? &gate->hw : NULL, gate_ops,
262				     flags | CLK_SET_RATE_UNGATE);
263	if (IS_ERR(clk)) {
264		kfree(frac);
265		return clk;
266	}
267
268	if (child) {
269		struct clk_mux *frac_mux = &frac->mux;
270		struct clk_init_data init;
271		struct clk *mux_clk;
272		int ret;
273
274		frac->mux_frac_idx = match_string(child->parent_names,
275						  child->num_parents, name);
276		frac->mux_ops = &clk_mux_ops;
277		frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
278
279		frac_mux->reg = base + child->muxdiv_offset;
280		frac_mux->shift = child->mux_shift;
281		frac_mux->mask = BIT(child->mux_width) - 1;
282		frac_mux->flags = child->mux_flags;
283		frac_mux->lock = lock;
284		frac_mux->hw.init = &init;
285
286		init.name = child->name;
287		init.flags = child->flags | CLK_SET_RATE_PARENT;
288		init.ops = frac->mux_ops;
289		init.parent_names = child->parent_names;
290		init.num_parents = child->num_parents;
291
292		mux_clk = clk_register(NULL, &frac_mux->hw);
293		if (IS_ERR(mux_clk)) {
294			kfree(frac);
295			return clk;
296		}
297
298		rockchip_clk_add_lookup(ctx, mux_clk, child->id);
299
300		/* notifier on the fraction divider to catch rate changes */
301		if (frac->mux_frac_idx >= 0) {
302			pr_debug("%s: found fractional parent in mux at pos %d\n",
303				 __func__, frac->mux_frac_idx);
304			ret = clk_notifier_register(clk, &frac->clk_nb);
305			if (ret)
306				pr_err("%s: failed to register clock notifier for %s\n",
307						__func__, name);
308		} else {
309			pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
310				__func__, name, child->name);
311		}
312	}
313
314	return clk;
315}
316
317static struct clk *rockchip_clk_register_factor_branch(const char *name,
318		const char *const *parent_names, u8 num_parents,
319		void __iomem *base, unsigned int mult, unsigned int div,
320		int gate_offset, u8 gate_shift, u8 gate_flags,
321		unsigned long flags, spinlock_t *lock)
322{
323	struct clk *clk;
324	struct clk_gate *gate = NULL;
325	struct clk_fixed_factor *fix = NULL;
326
327	/* without gate, register a simple factor clock */
328	if (gate_offset == 0) {
329		return clk_register_fixed_factor(NULL, name,
330				parent_names[0], flags, mult,
331				div);
332	}
333
334	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
335	if (!gate)
336		return ERR_PTR(-ENOMEM);
337
338	gate->flags = gate_flags;
339	gate->reg = base + gate_offset;
340	gate->bit_idx = gate_shift;
341	gate->lock = lock;
342
343	fix = kzalloc(sizeof(*fix), GFP_KERNEL);
344	if (!fix) {
345		kfree(gate);
346		return ERR_PTR(-ENOMEM);
347	}
348
349	fix->mult = mult;
350	fix->div = div;
351
352	clk = clk_register_composite(NULL, name, parent_names, num_parents,
353				     NULL, NULL,
354				     &fix->hw, &clk_fixed_factor_ops,
355				     &gate->hw, &clk_gate_ops, flags);
356	if (IS_ERR(clk)) {
357		kfree(fix);
358		kfree(gate);
359	}
360
361	return clk;
362}
363
364struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
365			void __iomem *base, unsigned long nr_clks)
366{
367	struct rockchip_clk_provider *ctx;
368	struct clk **clk_table;
369	int i;
370
371	ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
372	if (!ctx)
373		return ERR_PTR(-ENOMEM);
374
375	clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
376	if (!clk_table)
377		goto err_free;
378
379	for (i = 0; i < nr_clks; ++i)
380		clk_table[i] = ERR_PTR(-ENOENT);
381
382	ctx->reg_base = base;
383	ctx->clk_data.clks = clk_table;
384	ctx->clk_data.clk_num = nr_clks;
385	ctx->cru_node = np;
386	spin_lock_init(&ctx->lock);
387
388	ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
389						   "rockchip,grf");
390
391	return ctx;
392
393err_free:
394	kfree(ctx);
395	return ERR_PTR(-ENOMEM);
396}
397
398void __init rockchip_clk_of_add_provider(struct device_node *np,
399				struct rockchip_clk_provider *ctx)
400{
401	if (of_clk_add_provider(np, of_clk_src_onecell_get,
402				&ctx->clk_data))
403		pr_err("%s: could not register clk provider\n", __func__);
404}
405
406void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
407			     struct clk *clk, unsigned int id)
408{
409	if (ctx->clk_data.clks && id)
410		ctx->clk_data.clks[id] = clk;
411}
412
413void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
414				struct rockchip_pll_clock *list,
415				unsigned int nr_pll, int grf_lock_offset)
416{
417	struct clk *clk;
418	int idx;
419
420	for (idx = 0; idx < nr_pll; idx++, list++) {
421		clk = rockchip_clk_register_pll(ctx, list->type, list->name,
422				list->parent_names, list->num_parents,
423				list->con_offset, grf_lock_offset,
424				list->lock_shift, list->mode_offset,
425				list->mode_shift, list->rate_table,
426				list->flags, list->pll_flags);
427		if (IS_ERR(clk)) {
428			pr_err("%s: failed to register clock %s\n", __func__,
429				list->name);
430			continue;
431		}
432
433		rockchip_clk_add_lookup(ctx, clk, list->id);
434	}
435}
436
437void __init rockchip_clk_register_branches(
438				      struct rockchip_clk_provider *ctx,
439				      struct rockchip_clk_branch *list,
440				      unsigned int nr_clk)
441{
442	struct clk *clk = NULL;
443	unsigned int idx;
444	unsigned long flags;
445
446	for (idx = 0; idx < nr_clk; idx++, list++) {
447		flags = list->flags;
448
449		/* catch simple muxes */
450		switch (list->branch_type) {
451		case branch_mux:
452			clk = clk_register_mux(NULL, list->name,
453				list->parent_names, list->num_parents,
454				flags, ctx->reg_base + list->muxdiv_offset,
455				list->mux_shift, list->mux_width,
456				list->mux_flags, &ctx->lock);
457			break;
458		case branch_muxgrf:
459			clk = rockchip_clk_register_muxgrf(list->name,
460				list->parent_names, list->num_parents,
461				flags, ctx->grf, list->muxdiv_offset,
462				list->mux_shift, list->mux_width,
463				list->mux_flags);
464			break;
465		case branch_divider:
466			if (list->div_table)
467				clk = clk_register_divider_table(NULL,
468					list->name, list->parent_names[0],
469					flags,
470					ctx->reg_base + list->muxdiv_offset,
471					list->div_shift, list->div_width,
472					list->div_flags, list->div_table,
473					&ctx->lock);
474			else
475				clk = clk_register_divider(NULL, list->name,
476					list->parent_names[0], flags,
477					ctx->reg_base + list->muxdiv_offset,
478					list->div_shift, list->div_width,
479					list->div_flags, &ctx->lock);
480			break;
481		case branch_fraction_divider:
482			clk = rockchip_clk_register_frac_branch(ctx, list->name,
483				list->parent_names, list->num_parents,
484				ctx->reg_base, list->muxdiv_offset,
485				list->div_flags,
486				list->gate_offset, list->gate_shift,
487				list->gate_flags, flags, list->child,
488				&ctx->lock);
489			break;
490		case branch_half_divider:
491			clk = rockchip_clk_register_halfdiv(list->name,
492				list->parent_names, list->num_parents,
493				ctx->reg_base, list->muxdiv_offset,
494				list->mux_shift, list->mux_width,
495				list->mux_flags, list->div_shift,
496				list->div_width, list->div_flags,
497				list->gate_offset, list->gate_shift,
498				list->gate_flags, flags, &ctx->lock);
499			break;
500		case branch_gate:
501			flags |= CLK_SET_RATE_PARENT;
502
503			clk = clk_register_gate(NULL, list->name,
504				list->parent_names[0], flags,
505				ctx->reg_base + list->gate_offset,
506				list->gate_shift, list->gate_flags, &ctx->lock);
507			break;
508		case branch_composite:
509			clk = rockchip_clk_register_branch(list->name,
510				list->parent_names, list->num_parents,
511				ctx->reg_base, list->muxdiv_offset,
512				list->mux_shift,
513				list->mux_width, list->mux_flags,
514				list->div_offset, list->div_shift, list->div_width,
515				list->div_flags, list->div_table,
516				list->gate_offset, list->gate_shift,
517				list->gate_flags, flags, &ctx->lock);
518			break;
519		case branch_mmc:
520			clk = rockchip_clk_register_mmc(
521				list->name,
522				list->parent_names, list->num_parents,
523				ctx->reg_base + list->muxdiv_offset,
524				list->div_shift
525			);
526			break;
527		case branch_inverter:
528			clk = rockchip_clk_register_inverter(
529				list->name, list->parent_names,
530				list->num_parents,
531				ctx->reg_base + list->muxdiv_offset,
532				list->div_shift, list->div_flags, &ctx->lock);
533			break;
534		case branch_factor:
535			clk = rockchip_clk_register_factor_branch(
536				list->name, list->parent_names,
537				list->num_parents, ctx->reg_base,
538				list->div_shift, list->div_width,
539				list->gate_offset, list->gate_shift,
540				list->gate_flags, flags, &ctx->lock);
541			break;
542		case branch_ddrclk:
543			clk = rockchip_clk_register_ddrclk(
544				list->name, list->flags,
545				list->parent_names, list->num_parents,
546				list->muxdiv_offset, list->mux_shift,
547				list->mux_width, list->div_shift,
548				list->div_width, list->div_flags,
549				ctx->reg_base, &ctx->lock);
550			break;
551		}
552
553		/* none of the cases above matched */
554		if (!clk) {
555			pr_err("%s: unknown clock type %d\n",
556			       __func__, list->branch_type);
557			continue;
558		}
559
560		if (IS_ERR(clk)) {
561			pr_err("%s: failed to register clock %s: %ld\n",
562			       __func__, list->name, PTR_ERR(clk));
563			continue;
564		}
565
566		rockchip_clk_add_lookup(ctx, clk, list->id);
567	}
568}
569
570void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
571			unsigned int lookup_id,
572			const char *name, const char *const *parent_names,
573			u8 num_parents,
574			const struct rockchip_cpuclk_reg_data *reg_data,
575			const struct rockchip_cpuclk_rate_table *rates,
576			int nrates)
577{
578	struct clk *clk;
579
580	clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
581					   reg_data, rates, nrates,
582					   ctx->reg_base, &ctx->lock);
583	if (IS_ERR(clk)) {
584		pr_err("%s: failed to register clock %s: %ld\n",
585		       __func__, name, PTR_ERR(clk));
586		return;
587	}
588
589	rockchip_clk_add_lookup(ctx, clk, lookup_id);
590}
591
592void __init rockchip_clk_protect_critical(const char *const clocks[],
593					  int nclocks)
594{
595	int i;
596
597	/* Protect the clocks that needs to stay on */
598	for (i = 0; i < nclocks; i++) {
599		struct clk *clk = __clk_lookup(clocks[i]);
600
601		if (clk)
602			clk_prepare_enable(clk);
603	}
604}
605
606static void __iomem *rst_base;
607static unsigned int reg_restart;
608static void (*cb_restart)(void);
609static int rockchip_restart_notify(struct notifier_block *this,
610				   unsigned long mode, void *cmd)
611{
612	if (cb_restart)
613		cb_restart();
614
615	writel(0xfdb9, rst_base + reg_restart);
616	return NOTIFY_DONE;
617}
618
619static struct notifier_block rockchip_restart_handler = {
620	.notifier_call = rockchip_restart_notify,
621	.priority = 128,
622};
623
624void __init
625rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
626					       unsigned int reg,
627					       void (*cb)(void))
628{
629	int ret;
630
631	rst_base = ctx->reg_base;
632	reg_restart = reg;
633	cb_restart = cb;
634	ret = register_restart_handler(&rockchip_restart_handler);
635	if (ret)
636		pr_err("%s: cannot register restart handler, %d\n",
637		       __func__, ret);
638}