Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * OMAP clkctrl clock support
  4 *
  5 * Copyright (C) 2017 Texas Instruments, Inc.
  6 *
  7 * Tero Kristo <t-kristo@ti.com>
 
 
 
 
 
 
 
 
 
  8 */
  9
 10#include <linux/clk-provider.h>
 11#include <linux/slab.h>
 12#include <linux/of.h>
 13#include <linux/of_address.h>
 14#include <linux/clk/ti.h>
 15#include <linux/delay.h>
 16#include <linux/string_helpers.h>
 17#include <linux/timekeeping.h>
 18#include "clock.h"
 19
 20#define NO_IDLEST			0
 21
 22#define OMAP4_MODULEMODE_MASK		0x3
 23
 24#define MODULEMODE_HWCTRL		0x1
 25#define MODULEMODE_SWCTRL		0x2
 26
 27#define OMAP4_IDLEST_MASK		(0x3 << 16)
 28#define OMAP4_IDLEST_SHIFT		16
 29
 30#define OMAP4_STBYST_MASK		BIT(18)
 31#define OMAP4_STBYST_SHIFT		18
 32
 33#define CLKCTRL_IDLEST_FUNCTIONAL	0x0
 34#define CLKCTRL_IDLEST_INTERFACE_IDLE	0x2
 35#define CLKCTRL_IDLEST_DISABLED		0x3
 36
 37/* These timeouts are in us */
 38#define OMAP4_MAX_MODULE_READY_TIME	2000
 39#define OMAP4_MAX_MODULE_DISABLE_TIME	5000
 40
 41static bool _early_timeout = true;
 42
 43struct omap_clkctrl_provider {
 44	void __iomem *base;
 45	struct list_head clocks;
 46	char *clkdm_name;
 47};
 48
 49struct omap_clkctrl_clk {
 50	struct clk_hw *clk;
 51	u16 reg_offset;
 52	int bit_offset;
 53	struct list_head node;
 54};
 55
 56union omap4_timeout {
 57	u32 cycles;
 58	ktime_t start;
 59};
 60
 61static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
 62	{ 0 },
 63};
 64
 65static u32 _omap4_idlest(u32 val)
 66{
 67	val &= OMAP4_IDLEST_MASK;
 68	val >>= OMAP4_IDLEST_SHIFT;
 69
 70	return val;
 71}
 72
 73static bool _omap4_is_idle(u32 val)
 74{
 75	val = _omap4_idlest(val);
 76
 77	return val == CLKCTRL_IDLEST_DISABLED;
 78}
 79
 80static bool _omap4_is_ready(u32 val)
 81{
 82	val = _omap4_idlest(val);
 83
 84	return val == CLKCTRL_IDLEST_FUNCTIONAL ||
 85	       val == CLKCTRL_IDLEST_INTERFACE_IDLE;
 86}
 87
 88static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
 89{
 90	/*
 91	 * There are two special cases where ktime_to_ns() can't be
 92	 * used to track the timeouts. First one is during early boot
 93	 * when the timers haven't been initialized yet. The second
 94	 * one is during suspend-resume cycle while timekeeping is
 95	 * being suspended / resumed. Clocksource for the system
 96	 * can be from a timer that requires pm_runtime access, which
 97	 * will eventually bring us here with timekeeping_suspended,
 98	 * during both suspend entry and resume paths. This happens
 99	 * at least on am43xx platform. Account for flakeyness
100	 * with udelay() by multiplying the timeout value by 2.
101	 */
102	if (unlikely(_early_timeout || timekeeping_suspended)) {
103		if (time->cycles++ < timeout) {
104			udelay(1 * 2);
105			return false;
106		}
107	} else {
108		if (!ktime_to_ns(time->start)) {
109			time->start = ktime_get();
110			return false;
111		}
112
113		if (ktime_us_delta(ktime_get(), time->start) < timeout) {
114			cpu_relax();
115			return false;
116		}
117	}
118
119	return true;
120}
121
122static int __init _omap4_disable_early_timeout(void)
123{
124	_early_timeout = false;
125
126	return 0;
127}
128arch_initcall(_omap4_disable_early_timeout);
129
130static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
131{
132	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
133	u32 val;
134	int ret;
135	union omap4_timeout timeout = { 0 };
136
137	if (clk->clkdm) {
138		ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
139		if (ret) {
140			WARN(1,
141			     "%s: could not enable %s's clockdomain %s: %d\n",
142			     __func__, clk_hw_get_name(hw),
143			     clk->clkdm_name, ret);
144			return ret;
145		}
146	}
147
148	if (!clk->enable_bit)
149		return 0;
150
151	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
152
153	val &= ~OMAP4_MODULEMODE_MASK;
154	val |= clk->enable_bit;
155
156	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
157
158	if (test_bit(NO_IDLEST, &clk->flags))
159		return 0;
160
161	/* Wait until module is enabled */
162	while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
163		if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
164			pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
165			return -EBUSY;
166		}
167	}
168
169	return 0;
170}
171
172static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
173{
174	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
175	u32 val;
176	union omap4_timeout timeout = { 0 };
177
178	if (!clk->enable_bit)
179		goto exit;
180
181	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
182
183	val &= ~OMAP4_MODULEMODE_MASK;
184
185	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
186
187	if (test_bit(NO_IDLEST, &clk->flags))
188		goto exit;
189
190	/* Wait until module is disabled */
191	while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
192		if (_omap4_is_timeout(&timeout,
193				      OMAP4_MAX_MODULE_DISABLE_TIME)) {
194			pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
195			break;
196		}
197	}
198
199exit:
200	if (clk->clkdm)
201		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
202}
203
204static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
205{
206	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
207	u32 val;
208
209	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
210
211	if (val & clk->enable_bit)
212		return 1;
213
214	return 0;
215}
216
217static const struct clk_ops omap4_clkctrl_clk_ops = {
218	.enable		= _omap4_clkctrl_clk_enable,
219	.disable	= _omap4_clkctrl_clk_disable,
220	.is_enabled	= _omap4_clkctrl_clk_is_enabled,
221	.init		= omap2_init_clk_clkdm,
222};
223
224static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
225					      void *data)
226{
227	struct omap_clkctrl_provider *provider = data;
228	struct omap_clkctrl_clk *entry = NULL, *iter;
 
229
230	if (clkspec->args_count != 2)
231		return ERR_PTR(-EINVAL);
232
233	pr_debug("%s: looking for %x:%x\n", __func__,
234		 clkspec->args[0], clkspec->args[1]);
235
236	list_for_each_entry(iter, &provider->clocks, node) {
237		if (iter->reg_offset == clkspec->args[0] &&
238		    iter->bit_offset == clkspec->args[1]) {
239			entry = iter;
240			break;
241		}
242	}
243
244	if (!entry)
245		return ERR_PTR(-EINVAL);
246
247	return entry->clk;
248}
249
250/* Get clkctrl clock base name based on clkctrl_name or dts node */
251static const char * __init clkctrl_get_clock_name(struct device_node *np,
252						  const char *clkctrl_name,
253						  int offset, int index,
254						  bool legacy_naming)
255{
256	char *clock_name;
257
258	/* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
259	if (clkctrl_name && !legacy_naming) {
260		clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
261				       clkctrl_name, offset, index);
262		if (!clock_name)
263			return NULL;
264
265		strreplace(clock_name, '_', '-');
266
267		return clock_name;
268	}
269
270	/* l4per:1234:0 old style naming based on clkctrl_name */
271	if (clkctrl_name)
272		return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
273				 clkctrl_name, offset, index);
274
275	/* l4per_cm:1234:0 old style naming based on parent node name */
276	if (legacy_naming)
277		return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
278				 np->parent, offset, index);
279
280	/* l4per-clkctrl:1234:0 style naming based on node name */
281	return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
282}
283
284static int __init
285_ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
286			 struct device_node *node, struct clk_hw *clk_hw,
287			 u16 offset, u8 bit, const char * const *parents,
288			 int num_parents, const struct clk_ops *ops,
289			 const char *clkctrl_name)
290{
291	struct clk_init_data init = { NULL };
292	struct clk *clk;
293	struct omap_clkctrl_clk *clkctrl_clk;
294	int ret = 0;
295
296	init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
297					   ti_clk_get_features()->flags &
298					   TI_CLK_CLKCTRL_COMPAT);
299
300	clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
301	if (!init.name || !clkctrl_clk) {
302		ret = -ENOMEM;
303		goto cleanup;
304	}
305
306	clk_hw->init = &init;
307	init.parent_names = parents;
308	init.num_parents = num_parents;
309	init.ops = ops;
310	init.flags = 0;
311
312	clk = of_ti_clk_register(node, clk_hw, init.name);
313	if (IS_ERR_OR_NULL(clk)) {
314		ret = -EINVAL;
315		goto cleanup;
316	}
317
318	clkctrl_clk->reg_offset = offset;
319	clkctrl_clk->bit_offset = bit;
320	clkctrl_clk->clk = clk_hw;
321
322	list_add(&clkctrl_clk->node, &provider->clocks);
323
324	return 0;
325
326cleanup:
327	kfree(init.name);
328	kfree(clkctrl_clk);
329	return ret;
330}
331
332static void __init
333_ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
334		       struct device_node *node, u16 offset,
335		       const struct omap_clkctrl_bit_data *data,
336		       void __iomem *reg, const char *clkctrl_name)
337{
338	struct clk_hw_omap *clk_hw;
339
340	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
341	if (!clk_hw)
342		return;
343
344	clk_hw->enable_bit = data->bit;
345	clk_hw->enable_reg.ptr = reg;
346
347	if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
348				     data->bit, data->parents, 1,
349				     &omap_gate_clk_ops, clkctrl_name))
350		kfree(clk_hw);
351}
352
353static void __init
354_ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
355		      struct device_node *node, u16 offset,
356		      const struct omap_clkctrl_bit_data *data,
357		      void __iomem *reg, const char *clkctrl_name)
358{
359	struct clk_omap_mux *mux;
360	int num_parents = 0;
361	const char * const *pname;
362
363	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
364	if (!mux)
365		return;
366
367	pname = data->parents;
368	while (*pname) {
369		num_parents++;
370		pname++;
371	}
372
373	mux->mask = num_parents;
374	if (!(mux->flags & CLK_MUX_INDEX_ONE))
375		mux->mask--;
376
377	mux->mask = (1 << fls(mux->mask)) - 1;
378
379	mux->shift = data->bit;
380	mux->reg.ptr = reg;
381
382	if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
383				     data->bit, data->parents, num_parents,
384				     &ti_clk_mux_ops, clkctrl_name))
385		kfree(mux);
386}
387
388static void __init
389_ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
390		      struct device_node *node, u16 offset,
391		      const struct omap_clkctrl_bit_data *data,
392		      void __iomem *reg, const char *clkctrl_name)
393{
394	struct clk_omap_divider *div;
395	const struct omap_clkctrl_div_data *div_data = data->data;
396	u8 div_flags = 0;
397
398	div = kzalloc(sizeof(*div), GFP_KERNEL);
399	if (!div)
400		return;
401
402	div->reg.ptr = reg;
403	div->shift = data->bit;
404	div->flags = div_data->flags;
405
406	if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
407		div_flags |= CLKF_INDEX_POWER_OF_TWO;
408
409	if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
410				      div_data->max_div, div_flags,
411				      div)) {
412		pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
413		       node, offset, data->bit);
414		kfree(div);
415		return;
416	}
417
418	if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
419				     data->bit, data->parents, 1,
420				     &ti_clk_divider_ops, clkctrl_name))
421		kfree(div);
422}
423
424static void __init
425_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
426			  struct device_node *node,
427			  const struct omap_clkctrl_reg_data *data,
428			  void __iomem *reg, const char *clkctrl_name)
429{
430	const struct omap_clkctrl_bit_data *bits = data->bit_data;
431
432	if (!bits)
433		return;
434
435	while (bits->bit) {
436		switch (bits->type) {
437		case TI_CLK_GATE:
438			_ti_clkctrl_setup_gate(provider, node, data->offset,
439					       bits, reg, clkctrl_name);
440			break;
441
442		case TI_CLK_DIVIDER:
443			_ti_clkctrl_setup_div(provider, node, data->offset,
444					      bits, reg, clkctrl_name);
445			break;
446
447		case TI_CLK_MUX:
448			_ti_clkctrl_setup_mux(provider, node, data->offset,
449					      bits, reg, clkctrl_name);
450			break;
451
452		default:
453			pr_err("%s: bad subclk type: %d\n", __func__,
454			       bits->type);
455			return;
456		}
457		bits++;
458	}
459}
460
461static void __init _clkctrl_add_provider(void *data,
462					 struct device_node *np)
463{
464	of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
465}
466
467/*
468 * Get clock name based on "clock-output-names" property or the
469 * compatible property for clkctrl.
470 */
471static const char * __init clkctrl_get_name(struct device_node *np)
472{
473	struct property *prop;
474	const int prefix_len = 11;
475	const char *compat;
476	const char *output;
477	const char *end;
478	char *name;
479
480	if (!of_property_read_string_index(np, "clock-output-names", 0,
481					   &output)) {
482		int len;
483
484		len = strlen(output);
485		end = strstr(output, "_clkctrl");
486		if (end)
487			len -= strlen(end);
488		name = kstrndup(output, len, GFP_KERNEL);
489
490		return name;
491	}
492
493	of_property_for_each_string(np, "compatible", prop, compat) {
494		if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
495			end = compat + prefix_len;
496			/* Two letter minimum name length for l3, l4 etc */
497			if (strnlen(end, 16) < 2)
498				continue;
499			name = kstrdup_and_replace(end, '-', '_', GFP_KERNEL);
500			if (!name)
501				continue;
 
502
503			return name;
504		}
505	}
506
507	return NULL;
508}
509
510static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
511{
512	struct omap_clkctrl_provider *provider;
513	const struct omap_clkctrl_data *data = default_clkctrl_data;
514	const struct omap_clkctrl_reg_data *reg_data;
515	struct clk_init_data init = { NULL };
516	struct clk_hw_omap *hw;
517	struct clk *clk;
518	struct omap_clkctrl_clk *clkctrl_clk = NULL;
 
519	bool legacy_naming;
520	const char *clkctrl_name;
521	u32 addr;
522	int ret;
523	char *c;
524	u16 soc_mask = 0;
525	struct resource res;
526
527	of_address_to_resource(node, 0, &res);
528	addr = (u32)res.start;
 
 
 
 
529
530#ifdef CONFIG_ARCH_OMAP4
531	if (of_machine_is_compatible("ti,omap4"))
532		data = omap4_clkctrl_data;
533#endif
534#ifdef CONFIG_SOC_OMAP5
535	if (of_machine_is_compatible("ti,omap5"))
536		data = omap5_clkctrl_data;
537#endif
538#ifdef CONFIG_SOC_DRA7XX
539	if (of_machine_is_compatible("ti,dra7"))
540		data = dra7_clkctrl_data;
 
 
 
 
 
541	if (of_machine_is_compatible("ti,dra72"))
542		soc_mask = CLKF_SOC_DRA72;
543	if (of_machine_is_compatible("ti,dra74"))
544		soc_mask = CLKF_SOC_DRA74;
545	if (of_machine_is_compatible("ti,dra76"))
546		soc_mask = CLKF_SOC_DRA76;
547#endif
548#ifdef CONFIG_SOC_AM33XX
549	if (of_machine_is_compatible("ti,am33xx"))
550		data = am3_clkctrl_data;
 
 
 
 
551#endif
552#ifdef CONFIG_SOC_AM43XX
553	if (of_machine_is_compatible("ti,am4372"))
554		data = am4_clkctrl_data;
 
 
 
 
555
556	if (of_machine_is_compatible("ti,am438x"))
557		data = am438x_clkctrl_data;
 
 
 
 
558#endif
559#ifdef CONFIG_SOC_TI81XX
560	if (of_machine_is_compatible("ti,dm814"))
561		data = dm814_clkctrl_data;
562
563	if (of_machine_is_compatible("ti,dm816"))
564		data = dm816_clkctrl_data;
565#endif
566
567	if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
568		soc_mask |= CLKF_SOC_NONSEC;
569
570	while (data->addr) {
571		if (addr == data->addr)
572			break;
573
574		data++;
575	}
576
577	if (!data->addr) {
578		pr_err("%pOF not found from clkctrl data.\n", node);
579		return;
580	}
581
582	provider = kzalloc(sizeof(*provider), GFP_KERNEL);
583	if (!provider)
584		return;
585
586	provider->base = of_iomap(node, 0);
587
588	legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
589	clkctrl_name = clkctrl_get_name(node);
590	if (clkctrl_name) {
591		provider->clkdm_name = kasprintf(GFP_KERNEL,
592						 "%s_clkdm", clkctrl_name);
593		if (!provider->clkdm_name) {
594			kfree(provider);
595			return;
596		}
597		goto clkdm_found;
598	}
599
600	/*
601	 * The code below can be removed when all clkctrl nodes use domain
602	 * specific compatible property and standard clock node naming
603	 */
604	if (legacy_naming) {
605		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
606		if (!provider->clkdm_name) {
607			kfree(provider);
608			return;
609		}
610
611		/*
612		 * Create default clkdm name, replace _cm from end of parent
613		 * node name with _clkdm
614		 */
615		provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
616	} else {
617		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
618		if (!provider->clkdm_name) {
619			kfree(provider);
620			return;
621		}
622
623		/*
624		 * Create default clkdm name, replace _clkctrl from end of
625		 * node name with _clkdm
626		 */
627		provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
628	}
629
630	strcat(provider->clkdm_name, "clkdm");
631
632	/* Replace any dash from the clkdm name with underscore */
633	c = provider->clkdm_name;
634
635	while (*c) {
636		if (*c == '-')
637			*c = '_';
638		c++;
639	}
640clkdm_found:
641	INIT_LIST_HEAD(&provider->clocks);
642
643	/* Generate clocks */
644	reg_data = data->regs;
645
646	while (reg_data->parent) {
647		if ((reg_data->flags & CLKF_SOC_MASK) &&
648		    (reg_data->flags & soc_mask) == 0) {
649			reg_data++;
650			continue;
651		}
652
653		hw = kzalloc(sizeof(*hw), GFP_KERNEL);
654		if (!hw)
655			return;
656
657		hw->enable_reg.ptr = provider->base + reg_data->offset;
658
659		_ti_clkctrl_setup_subclks(provider, node, reg_data,
660					  hw->enable_reg.ptr, clkctrl_name);
661
662		if (reg_data->flags & CLKF_SW_SUP)
663			hw->enable_bit = MODULEMODE_SWCTRL;
664		if (reg_data->flags & CLKF_HW_SUP)
665			hw->enable_bit = MODULEMODE_HWCTRL;
666		if (reg_data->flags & CLKF_NO_IDLEST)
667			set_bit(NO_IDLEST, &hw->flags);
668
669		if (reg_data->clkdm_name)
670			hw->clkdm_name = reg_data->clkdm_name;
671		else
672			hw->clkdm_name = provider->clkdm_name;
673
674		init.parent_names = &reg_data->parent;
675		init.num_parents = 1;
676		init.flags = 0;
677		if (reg_data->flags & CLKF_SET_RATE_PARENT)
678			init.flags |= CLK_SET_RATE_PARENT;
679
680		init.name = clkctrl_get_clock_name(node, clkctrl_name,
681						   reg_data->offset, 0,
682						   legacy_naming);
683		if (!init.name)
684			goto cleanup;
685
686		clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
687		if (!clkctrl_clk)
688			goto cleanup;
689
690		init.ops = &omap4_clkctrl_clk_ops;
691		hw->hw.init = &init;
692
693		clk = of_ti_clk_register_omap_hw(node, &hw->hw, init.name);
694		if (IS_ERR_OR_NULL(clk))
695			goto cleanup;
696
697		clkctrl_clk->reg_offset = reg_data->offset;
698		clkctrl_clk->clk = &hw->hw;
699
700		list_add(&clkctrl_clk->node, &provider->clocks);
701
702		reg_data++;
703	}
704
705	ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
706	if (ret == -EPROBE_DEFER)
707		ti_clk_retry_init(node, provider, _clkctrl_add_provider);
708
709	kfree(clkctrl_name);
710
711	return;
712
713cleanup:
714	kfree(hw);
715	kfree(init.name);
716	kfree(clkctrl_name);
717	kfree(clkctrl_clk);
718}
719CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
720	       _ti_omap4_clkctrl_setup);
721
722/**
723 * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
724 * @clk: clock to check standby status for
725 *
726 * Finds whether the provided clock is in standby mode or not. Returns
727 * true if the provided clock is a clkctrl type clock and it is in standby,
728 * false otherwise.
729 */
730bool ti_clk_is_in_standby(struct clk *clk)
731{
732	struct clk_hw *hw;
733	struct clk_hw_omap *hwclk;
734	u32 val;
735
736	hw = __clk_get_hw(clk);
737
738	if (!omap2_clk_is_hw_omap(hw))
739		return false;
740
741	hwclk = to_clk_hw_omap(hw);
742
743	val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
744
745	if (val & OMAP4_STBYST_MASK)
746		return true;
747
748	return false;
749}
750EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);
v5.9
 
  1/*
  2 * OMAP clkctrl clock support
  3 *
  4 * Copyright (C) 2017 Texas Instruments, Inc.
  5 *
  6 * Tero Kristo <t-kristo@ti.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 *
 12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 13 * kind, whether express or implied; without even the implied warranty
 14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 */
 17
 18#include <linux/clk-provider.h>
 19#include <linux/slab.h>
 20#include <linux/of.h>
 21#include <linux/of_address.h>
 22#include <linux/clk/ti.h>
 23#include <linux/delay.h>
 
 24#include <linux/timekeeping.h>
 25#include "clock.h"
 26
 27#define NO_IDLEST			0
 28
 29#define OMAP4_MODULEMODE_MASK		0x3
 30
 31#define MODULEMODE_HWCTRL		0x1
 32#define MODULEMODE_SWCTRL		0x2
 33
 34#define OMAP4_IDLEST_MASK		(0x3 << 16)
 35#define OMAP4_IDLEST_SHIFT		16
 36
 37#define OMAP4_STBYST_MASK		BIT(18)
 38#define OMAP4_STBYST_SHIFT		18
 39
 40#define CLKCTRL_IDLEST_FUNCTIONAL	0x0
 41#define CLKCTRL_IDLEST_INTERFACE_IDLE	0x2
 42#define CLKCTRL_IDLEST_DISABLED		0x3
 43
 44/* These timeouts are in us */
 45#define OMAP4_MAX_MODULE_READY_TIME	2000
 46#define OMAP4_MAX_MODULE_DISABLE_TIME	5000
 47
 48static bool _early_timeout = true;
 49
 50struct omap_clkctrl_provider {
 51	void __iomem *base;
 52	struct list_head clocks;
 53	char *clkdm_name;
 54};
 55
 56struct omap_clkctrl_clk {
 57	struct clk_hw *clk;
 58	u16 reg_offset;
 59	int bit_offset;
 60	struct list_head node;
 61};
 62
 63union omap4_timeout {
 64	u32 cycles;
 65	ktime_t start;
 66};
 67
 68static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
 69	{ 0 },
 70};
 71
 72static u32 _omap4_idlest(u32 val)
 73{
 74	val &= OMAP4_IDLEST_MASK;
 75	val >>= OMAP4_IDLEST_SHIFT;
 76
 77	return val;
 78}
 79
 80static bool _omap4_is_idle(u32 val)
 81{
 82	val = _omap4_idlest(val);
 83
 84	return val == CLKCTRL_IDLEST_DISABLED;
 85}
 86
 87static bool _omap4_is_ready(u32 val)
 88{
 89	val = _omap4_idlest(val);
 90
 91	return val == CLKCTRL_IDLEST_FUNCTIONAL ||
 92	       val == CLKCTRL_IDLEST_INTERFACE_IDLE;
 93}
 94
 95static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
 96{
 97	/*
 98	 * There are two special cases where ktime_to_ns() can't be
 99	 * used to track the timeouts. First one is during early boot
100	 * when the timers haven't been initialized yet. The second
101	 * one is during suspend-resume cycle while timekeeping is
102	 * being suspended / resumed. Clocksource for the system
103	 * can be from a timer that requires pm_runtime access, which
104	 * will eventually bring us here with timekeeping_suspended,
105	 * during both suspend entry and resume paths. This happens
106	 * at least on am43xx platform. Account for flakeyness
107	 * with udelay() by multiplying the timeout value by 2.
108	 */
109	if (unlikely(_early_timeout || timekeeping_suspended)) {
110		if (time->cycles++ < timeout) {
111			udelay(1 * 2);
112			return false;
113		}
114	} else {
115		if (!ktime_to_ns(time->start)) {
116			time->start = ktime_get();
117			return false;
118		}
119
120		if (ktime_us_delta(ktime_get(), time->start) < timeout) {
121			cpu_relax();
122			return false;
123		}
124	}
125
126	return true;
127}
128
129static int __init _omap4_disable_early_timeout(void)
130{
131	_early_timeout = false;
132
133	return 0;
134}
135arch_initcall(_omap4_disable_early_timeout);
136
137static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
138{
139	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
140	u32 val;
141	int ret;
142	union omap4_timeout timeout = { 0 };
143
144	if (clk->clkdm) {
145		ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
146		if (ret) {
147			WARN(1,
148			     "%s: could not enable %s's clockdomain %s: %d\n",
149			     __func__, clk_hw_get_name(hw),
150			     clk->clkdm_name, ret);
151			return ret;
152		}
153	}
154
155	if (!clk->enable_bit)
156		return 0;
157
158	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
159
160	val &= ~OMAP4_MODULEMODE_MASK;
161	val |= clk->enable_bit;
162
163	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
164
165	if (test_bit(NO_IDLEST, &clk->flags))
166		return 0;
167
168	/* Wait until module is enabled */
169	while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
170		if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
171			pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
172			return -EBUSY;
173		}
174	}
175
176	return 0;
177}
178
179static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
180{
181	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
182	u32 val;
183	union omap4_timeout timeout = { 0 };
184
185	if (!clk->enable_bit)
186		goto exit;
187
188	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
189
190	val &= ~OMAP4_MODULEMODE_MASK;
191
192	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
193
194	if (test_bit(NO_IDLEST, &clk->flags))
195		goto exit;
196
197	/* Wait until module is disabled */
198	while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
199		if (_omap4_is_timeout(&timeout,
200				      OMAP4_MAX_MODULE_DISABLE_TIME)) {
201			pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
202			break;
203		}
204	}
205
206exit:
207	if (clk->clkdm)
208		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
209}
210
211static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
212{
213	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
214	u32 val;
215
216	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
217
218	if (val & clk->enable_bit)
219		return 1;
220
221	return 0;
222}
223
224static const struct clk_ops omap4_clkctrl_clk_ops = {
225	.enable		= _omap4_clkctrl_clk_enable,
226	.disable	= _omap4_clkctrl_clk_disable,
227	.is_enabled	= _omap4_clkctrl_clk_is_enabled,
228	.init		= omap2_init_clk_clkdm,
229};
230
231static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
232					      void *data)
233{
234	struct omap_clkctrl_provider *provider = data;
235	struct omap_clkctrl_clk *entry;
236	bool found = false;
237
238	if (clkspec->args_count != 2)
239		return ERR_PTR(-EINVAL);
240
241	pr_debug("%s: looking for %x:%x\n", __func__,
242		 clkspec->args[0], clkspec->args[1]);
243
244	list_for_each_entry(entry, &provider->clocks, node) {
245		if (entry->reg_offset == clkspec->args[0] &&
246		    entry->bit_offset == clkspec->args[1]) {
247			found = true;
248			break;
249		}
250	}
251
252	if (!found)
253		return ERR_PTR(-EINVAL);
254
255	return entry->clk;
256}
257
258/* Get clkctrl clock base name based on clkctrl_name or dts node */
259static const char * __init clkctrl_get_clock_name(struct device_node *np,
260						  const char *clkctrl_name,
261						  int offset, int index,
262						  bool legacy_naming)
263{
264	char *clock_name;
265
266	/* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
267	if (clkctrl_name && !legacy_naming) {
268		clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
269				       clkctrl_name, offset, index);
 
 
 
270		strreplace(clock_name, '_', '-');
271
272		return clock_name;
273	}
274
275	/* l4per:1234:0 old style naming based on clkctrl_name */
276	if (clkctrl_name)
277		return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
278				 clkctrl_name, offset, index);
279
280	/* l4per_cm:1234:0 old style naming based on parent node name */
281	if (legacy_naming)
282		return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
283				 np->parent, offset, index);
284
285	/* l4per-clkctrl:1234:0 style naming based on node name */
286	return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
287}
288
289static int __init
290_ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
291			 struct device_node *node, struct clk_hw *clk_hw,
292			 u16 offset, u8 bit, const char * const *parents,
293			 int num_parents, const struct clk_ops *ops,
294			 const char *clkctrl_name)
295{
296	struct clk_init_data init = { NULL };
297	struct clk *clk;
298	struct omap_clkctrl_clk *clkctrl_clk;
299	int ret = 0;
300
301	init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
302					   ti_clk_get_features()->flags &
303					   TI_CLK_CLKCTRL_COMPAT);
304
305	clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
306	if (!init.name || !clkctrl_clk) {
307		ret = -ENOMEM;
308		goto cleanup;
309	}
310
311	clk_hw->init = &init;
312	init.parent_names = parents;
313	init.num_parents = num_parents;
314	init.ops = ops;
315	init.flags = 0;
316
317	clk = ti_clk_register(NULL, clk_hw, init.name);
318	if (IS_ERR_OR_NULL(clk)) {
319		ret = -EINVAL;
320		goto cleanup;
321	}
322
323	clkctrl_clk->reg_offset = offset;
324	clkctrl_clk->bit_offset = bit;
325	clkctrl_clk->clk = clk_hw;
326
327	list_add(&clkctrl_clk->node, &provider->clocks);
328
329	return 0;
330
331cleanup:
332	kfree(init.name);
333	kfree(clkctrl_clk);
334	return ret;
335}
336
337static void __init
338_ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
339		       struct device_node *node, u16 offset,
340		       const struct omap_clkctrl_bit_data *data,
341		       void __iomem *reg, const char *clkctrl_name)
342{
343	struct clk_hw_omap *clk_hw;
344
345	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
346	if (!clk_hw)
347		return;
348
349	clk_hw->enable_bit = data->bit;
350	clk_hw->enable_reg.ptr = reg;
351
352	if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
353				     data->bit, data->parents, 1,
354				     &omap_gate_clk_ops, clkctrl_name))
355		kfree(clk_hw);
356}
357
358static void __init
359_ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
360		      struct device_node *node, u16 offset,
361		      const struct omap_clkctrl_bit_data *data,
362		      void __iomem *reg, const char *clkctrl_name)
363{
364	struct clk_omap_mux *mux;
365	int num_parents = 0;
366	const char * const *pname;
367
368	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
369	if (!mux)
370		return;
371
372	pname = data->parents;
373	while (*pname) {
374		num_parents++;
375		pname++;
376	}
377
378	mux->mask = num_parents;
379	if (!(mux->flags & CLK_MUX_INDEX_ONE))
380		mux->mask--;
381
382	mux->mask = (1 << fls(mux->mask)) - 1;
383
384	mux->shift = data->bit;
385	mux->reg.ptr = reg;
386
387	if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
388				     data->bit, data->parents, num_parents,
389				     &ti_clk_mux_ops, clkctrl_name))
390		kfree(mux);
391}
392
393static void __init
394_ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
395		      struct device_node *node, u16 offset,
396		      const struct omap_clkctrl_bit_data *data,
397		      void __iomem *reg, const char *clkctrl_name)
398{
399	struct clk_omap_divider *div;
400	const struct omap_clkctrl_div_data *div_data = data->data;
401	u8 div_flags = 0;
402
403	div = kzalloc(sizeof(*div), GFP_KERNEL);
404	if (!div)
405		return;
406
407	div->reg.ptr = reg;
408	div->shift = data->bit;
409	div->flags = div_data->flags;
410
411	if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
412		div_flags |= CLKF_INDEX_POWER_OF_TWO;
413
414	if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
415				      div_data->max_div, div_flags,
416				      div)) {
417		pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
418		       node, offset, data->bit);
419		kfree(div);
420		return;
421	}
422
423	if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
424				     data->bit, data->parents, 1,
425				     &ti_clk_divider_ops, clkctrl_name))
426		kfree(div);
427}
428
429static void __init
430_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
431			  struct device_node *node,
432			  const struct omap_clkctrl_reg_data *data,
433			  void __iomem *reg, const char *clkctrl_name)
434{
435	const struct omap_clkctrl_bit_data *bits = data->bit_data;
436
437	if (!bits)
438		return;
439
440	while (bits->bit) {
441		switch (bits->type) {
442		case TI_CLK_GATE:
443			_ti_clkctrl_setup_gate(provider, node, data->offset,
444					       bits, reg, clkctrl_name);
445			break;
446
447		case TI_CLK_DIVIDER:
448			_ti_clkctrl_setup_div(provider, node, data->offset,
449					      bits, reg, clkctrl_name);
450			break;
451
452		case TI_CLK_MUX:
453			_ti_clkctrl_setup_mux(provider, node, data->offset,
454					      bits, reg, clkctrl_name);
455			break;
456
457		default:
458			pr_err("%s: bad subclk type: %d\n", __func__,
459			       bits->type);
460			return;
461		}
462		bits++;
463	}
464}
465
466static void __init _clkctrl_add_provider(void *data,
467					 struct device_node *np)
468{
469	of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
470}
471
472/* Get clock name based on compatible string for clkctrl */
473static char * __init clkctrl_get_name(struct device_node *np)
 
 
 
474{
475	struct property *prop;
476	const int prefix_len = 11;
477	const char *compat;
 
 
478	char *name;
479
 
 
 
 
 
 
 
 
 
 
 
 
 
480	of_property_for_each_string(np, "compatible", prop, compat) {
481		if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
 
482			/* Two letter minimum name length for l3, l4 etc */
483			if (strnlen(compat + prefix_len, 16) < 2)
484				continue;
485			name = kasprintf(GFP_KERNEL, "%s", compat + prefix_len);
486			if (!name)
487				continue;
488			strreplace(name, '-', '_');
489
490			return name;
491		}
492	}
493
494	return NULL;
495}
496
497static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
498{
499	struct omap_clkctrl_provider *provider;
500	const struct omap_clkctrl_data *data = default_clkctrl_data;
501	const struct omap_clkctrl_reg_data *reg_data;
502	struct clk_init_data init = { NULL };
503	struct clk_hw_omap *hw;
504	struct clk *clk;
505	struct omap_clkctrl_clk *clkctrl_clk = NULL;
506	const __be32 *addrp;
507	bool legacy_naming;
508	char *clkctrl_name;
509	u32 addr;
510	int ret;
511	char *c;
512	u16 soc_mask = 0;
 
513
514	if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
515	    of_node_name_eq(node, "clk"))
516		ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
517
518	addrp = of_get_address(node, 0, NULL, NULL);
519	addr = (u32)of_translate_address(node, addrp);
520
521#ifdef CONFIG_ARCH_OMAP4
522	if (of_machine_is_compatible("ti,omap4"))
523		data = omap4_clkctrl_data;
524#endif
525#ifdef CONFIG_SOC_OMAP5
526	if (of_machine_is_compatible("ti,omap5"))
527		data = omap5_clkctrl_data;
528#endif
529#ifdef CONFIG_SOC_DRA7XX
530	if (of_machine_is_compatible("ti,dra7")) {
531		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
532			data = dra7_clkctrl_compat_data;
533		else
534			data = dra7_clkctrl_data;
535	}
536
537	if (of_machine_is_compatible("ti,dra72"))
538		soc_mask = CLKF_SOC_DRA72;
539	if (of_machine_is_compatible("ti,dra74"))
540		soc_mask = CLKF_SOC_DRA74;
541	if (of_machine_is_compatible("ti,dra76"))
542		soc_mask = CLKF_SOC_DRA76;
543#endif
544#ifdef CONFIG_SOC_AM33XX
545	if (of_machine_is_compatible("ti,am33xx")) {
546		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
547			data = am3_clkctrl_compat_data;
548		else
549			data = am3_clkctrl_data;
550	}
551#endif
552#ifdef CONFIG_SOC_AM43XX
553	if (of_machine_is_compatible("ti,am4372")) {
554		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
555			data = am4_clkctrl_compat_data;
556		else
557			data = am4_clkctrl_data;
558	}
559
560	if (of_machine_is_compatible("ti,am438x")) {
561		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
562			data = am438x_clkctrl_compat_data;
563		else
564			data = am438x_clkctrl_data;
565	}
566#endif
567#ifdef CONFIG_SOC_TI81XX
568	if (of_machine_is_compatible("ti,dm814"))
569		data = dm814_clkctrl_data;
570
571	if (of_machine_is_compatible("ti,dm816"))
572		data = dm816_clkctrl_data;
573#endif
574
575	if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
576		soc_mask |= CLKF_SOC_NONSEC;
577
578	while (data->addr) {
579		if (addr == data->addr)
580			break;
581
582		data++;
583	}
584
585	if (!data->addr) {
586		pr_err("%pOF not found from clkctrl data.\n", node);
587		return;
588	}
589
590	provider = kzalloc(sizeof(*provider), GFP_KERNEL);
591	if (!provider)
592		return;
593
594	provider->base = of_iomap(node, 0);
595
596	legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
597	clkctrl_name = clkctrl_get_name(node);
598	if (clkctrl_name) {
599		provider->clkdm_name = kasprintf(GFP_KERNEL,
600						 "%s_clkdm", clkctrl_name);
 
 
 
 
601		goto clkdm_found;
602	}
603
604	/*
605	 * The code below can be removed when all clkctrl nodes use domain
606	 * specific compatible proprerty and standard clock node naming
607	 */
608	if (legacy_naming) {
609		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
610		if (!provider->clkdm_name) {
611			kfree(provider);
612			return;
613		}
614
615		/*
616		 * Create default clkdm name, replace _cm from end of parent
617		 * node name with _clkdm
618		 */
619		provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
620	} else {
621		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
622		if (!provider->clkdm_name) {
623			kfree(provider);
624			return;
625		}
626
627		/*
628		 * Create default clkdm name, replace _clkctrl from end of
629		 * node name with _clkdm
630		 */
631		provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
632	}
633
634	strcat(provider->clkdm_name, "clkdm");
635
636	/* Replace any dash from the clkdm name with underscore */
637	c = provider->clkdm_name;
638
639	while (*c) {
640		if (*c == '-')
641			*c = '_';
642		c++;
643	}
644clkdm_found:
645	INIT_LIST_HEAD(&provider->clocks);
646
647	/* Generate clocks */
648	reg_data = data->regs;
649
650	while (reg_data->parent) {
651		if ((reg_data->flags & CLKF_SOC_MASK) &&
652		    (reg_data->flags & soc_mask) == 0) {
653			reg_data++;
654			continue;
655		}
656
657		hw = kzalloc(sizeof(*hw), GFP_KERNEL);
658		if (!hw)
659			return;
660
661		hw->enable_reg.ptr = provider->base + reg_data->offset;
662
663		_ti_clkctrl_setup_subclks(provider, node, reg_data,
664					  hw->enable_reg.ptr, clkctrl_name);
665
666		if (reg_data->flags & CLKF_SW_SUP)
667			hw->enable_bit = MODULEMODE_SWCTRL;
668		if (reg_data->flags & CLKF_HW_SUP)
669			hw->enable_bit = MODULEMODE_HWCTRL;
670		if (reg_data->flags & CLKF_NO_IDLEST)
671			set_bit(NO_IDLEST, &hw->flags);
672
673		if (reg_data->clkdm_name)
674			hw->clkdm_name = reg_data->clkdm_name;
675		else
676			hw->clkdm_name = provider->clkdm_name;
677
678		init.parent_names = &reg_data->parent;
679		init.num_parents = 1;
680		init.flags = 0;
681		if (reg_data->flags & CLKF_SET_RATE_PARENT)
682			init.flags |= CLK_SET_RATE_PARENT;
683
684		init.name = clkctrl_get_clock_name(node, clkctrl_name,
685						   reg_data->offset, 0,
686						   legacy_naming);
687		if (!init.name)
688			goto cleanup;
689
690		clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
691		if (!clkctrl_clk)
692			goto cleanup;
693
694		init.ops = &omap4_clkctrl_clk_ops;
695		hw->hw.init = &init;
696
697		clk = ti_clk_register_omap_hw(NULL, &hw->hw, init.name);
698		if (IS_ERR_OR_NULL(clk))
699			goto cleanup;
700
701		clkctrl_clk->reg_offset = reg_data->offset;
702		clkctrl_clk->clk = &hw->hw;
703
704		list_add(&clkctrl_clk->node, &provider->clocks);
705
706		reg_data++;
707	}
708
709	ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
710	if (ret == -EPROBE_DEFER)
711		ti_clk_retry_init(node, provider, _clkctrl_add_provider);
712
713	kfree(clkctrl_name);
714
715	return;
716
717cleanup:
718	kfree(hw);
719	kfree(init.name);
720	kfree(clkctrl_name);
721	kfree(clkctrl_clk);
722}
723CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
724	       _ti_omap4_clkctrl_setup);
725
726/**
727 * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
728 * @clk: clock to check standby status for
729 *
730 * Finds whether the provided clock is in standby mode or not. Returns
731 * true if the provided clock is a clkctrl type clock and it is in standby,
732 * false otherwise.
733 */
734bool ti_clk_is_in_standby(struct clk *clk)
735{
736	struct clk_hw *hw;
737	struct clk_hw_omap *hwclk;
738	u32 val;
739
740	hw = __clk_get_hw(clk);
741
742	if (!omap2_clk_is_hw_omap(hw))
743		return false;
744
745	hwclk = to_clk_hw_omap(hw);
746
747	val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
748
749	if (val & OMAP4_STBYST_MASK)
750		return true;
751
752	return false;
753}
754EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);