Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Ingenic SoCs TCU IRQ driver
  4 * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
  5 * Copyright (C) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
  6 */
  7
  8#include <linux/bitops.h>
  9#include <linux/clk.h>
 10#include <linux/clockchips.h>
 11#include <linux/clocksource.h>
 12#include <linux/cpuhotplug.h>
 13#include <linux/interrupt.h>
 14#include <linux/mfd/ingenic-tcu.h>
 15#include <linux/mfd/syscon.h>
 16#include <linux/of.h>
 
 17#include <linux/of_irq.h>
 
 18#include <linux/overflow.h>
 19#include <linux/platform_device.h>
 20#include <linux/regmap.h>
 21#include <linux/sched_clock.h>
 22
 23#include <dt-bindings/clock/ingenic,tcu.h>
 24
 25static DEFINE_PER_CPU(call_single_data_t, ingenic_cevt_csd);
 26
 27struct ingenic_soc_info {
 28	unsigned int num_channels;
 29};
 30
 31struct ingenic_tcu_timer {
 32	unsigned int cpu;
 33	unsigned int channel;
 34	struct clock_event_device cevt;
 35	struct clk *clk;
 36	char name[8];
 37};
 38
 39struct ingenic_tcu {
 40	struct regmap *map;
 41	struct device_node *np;
 42	struct clk *cs_clk;
 43	unsigned int cs_channel;
 44	struct clocksource cs;
 45	unsigned long pwm_channels_mask;
 46	struct ingenic_tcu_timer timers[];
 47};
 48
 49static struct ingenic_tcu *ingenic_tcu;
 50
 51static u64 notrace ingenic_tcu_timer_read(void)
 52{
 53	struct ingenic_tcu *tcu = ingenic_tcu;
 54	unsigned int count;
 55
 56	regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count);
 57
 58	return count;
 59}
 60
 61static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs)
 62{
 63	return ingenic_tcu_timer_read();
 64}
 65
 66static inline struct ingenic_tcu *
 67to_ingenic_tcu(struct ingenic_tcu_timer *timer)
 68{
 69	return container_of(timer, struct ingenic_tcu, timers[timer->cpu]);
 70}
 71
 72static inline struct ingenic_tcu_timer *
 73to_ingenic_tcu_timer(struct clock_event_device *evt)
 74{
 75	return container_of(evt, struct ingenic_tcu_timer, cevt);
 76}
 77
 78static int ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device *evt)
 79{
 80	struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
 81	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
 82
 83	regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
 84
 85	return 0;
 86}
 87
 88static int ingenic_tcu_cevt_set_next(unsigned long next,
 89				     struct clock_event_device *evt)
 90{
 91	struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
 92	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
 93
 94	if (next > 0xffff)
 95		return -EINVAL;
 96
 97	regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next);
 98	regmap_write(tcu->map, TCU_REG_TCNTc(timer->channel), 0);
 99	regmap_write(tcu->map, TCU_REG_TESR, BIT(timer->channel));
100
101	return 0;
102}
103
104static void ingenic_per_cpu_event_handler(void *info)
105{
106	struct clock_event_device *cevt = (struct clock_event_device *) info;
107
108	cevt->event_handler(cevt);
109}
110
111static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
112{
113	struct ingenic_tcu_timer *timer = dev_id;
114	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
115	call_single_data_t *csd;
116
117	regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
118
119	if (timer->cevt.event_handler) {
120		csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
121		csd->info = (void *) &timer->cevt;
122		csd->func = ingenic_per_cpu_event_handler;
123		smp_call_function_single_async(timer->cpu, csd);
124	}
125
126	return IRQ_HANDLED;
127}
128
129static struct clk *ingenic_tcu_get_clock(struct device_node *np, int id)
130{
131	struct of_phandle_args args;
132
133	args.np = np;
134	args.args_count = 1;
135	args.args[0] = id;
136
137	return of_clk_get_from_provider(&args);
138}
139
140static int ingenic_tcu_setup_cevt(unsigned int cpu)
141{
142	struct ingenic_tcu *tcu = ingenic_tcu;
143	struct ingenic_tcu_timer *timer = &tcu->timers[cpu];
144	unsigned int timer_virq;
145	struct irq_domain *domain;
146	unsigned long rate;
147	int err;
148
149	timer->clk = ingenic_tcu_get_clock(tcu->np, timer->channel);
150	if (IS_ERR(timer->clk))
151		return PTR_ERR(timer->clk);
152
153	err = clk_prepare_enable(timer->clk);
154	if (err)
155		goto err_clk_put;
156
157	rate = clk_get_rate(timer->clk);
158	if (!rate) {
159		err = -EINVAL;
160		goto err_clk_disable;
161	}
162
163	domain = irq_find_host(tcu->np);
164	if (!domain) {
165		err = -ENODEV;
166		goto err_clk_disable;
167	}
168
169	timer_virq = irq_create_mapping(domain, timer->channel);
170	if (!timer_virq) {
171		err = -EINVAL;
172		goto err_clk_disable;
173	}
174
175	snprintf(timer->name, sizeof(timer->name), "TCU%u", timer->channel);
176
177	err = request_irq(timer_virq, ingenic_tcu_cevt_cb, IRQF_TIMER,
178			  timer->name, timer);
179	if (err)
180		goto err_irq_dispose_mapping;
181
182	timer->cpu = smp_processor_id();
183	timer->cevt.cpumask = cpumask_of(smp_processor_id());
184	timer->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
185	timer->cevt.name = timer->name;
186	timer->cevt.rating = 200;
187	timer->cevt.set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown;
188	timer->cevt.set_next_event = ingenic_tcu_cevt_set_next;
189
190	clockevents_config_and_register(&timer->cevt, rate, 10, 0xffff);
191
192	return 0;
193
194err_irq_dispose_mapping:
195	irq_dispose_mapping(timer_virq);
196err_clk_disable:
197	clk_disable_unprepare(timer->clk);
198err_clk_put:
199	clk_put(timer->clk);
200	return err;
201}
202
203static int __init ingenic_tcu_clocksource_init(struct device_node *np,
204					       struct ingenic_tcu *tcu)
205{
206	unsigned int channel = tcu->cs_channel;
207	struct clocksource *cs = &tcu->cs;
208	unsigned long rate;
209	int err;
210
211	tcu->cs_clk = ingenic_tcu_get_clock(np, channel);
212	if (IS_ERR(tcu->cs_clk))
213		return PTR_ERR(tcu->cs_clk);
214
215	err = clk_prepare_enable(tcu->cs_clk);
216	if (err)
217		goto err_clk_put;
218
219	rate = clk_get_rate(tcu->cs_clk);
220	if (!rate) {
221		err = -EINVAL;
222		goto err_clk_disable;
223	}
224
225	/* Reset channel */
226	regmap_update_bits(tcu->map, TCU_REG_TCSRc(channel),
227			   0xffff & ~TCU_TCSR_RESERVED_BITS, 0);
228
229	/* Reset counter */
230	regmap_write(tcu->map, TCU_REG_TDFRc(channel), 0xffff);
231	regmap_write(tcu->map, TCU_REG_TCNTc(channel), 0);
232
233	/* Enable channel */
234	regmap_write(tcu->map, TCU_REG_TESR, BIT(channel));
235
236	cs->name = "ingenic-timer";
237	cs->rating = 200;
238	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
239	cs->mask = CLOCKSOURCE_MASK(16);
240	cs->read = ingenic_tcu_timer_cs_read;
241
242	err = clocksource_register_hz(cs, rate);
243	if (err)
244		goto err_clk_disable;
245
246	return 0;
247
248err_clk_disable:
249	clk_disable_unprepare(tcu->cs_clk);
250err_clk_put:
251	clk_put(tcu->cs_clk);
252	return err;
253}
254
255static const struct ingenic_soc_info jz4740_soc_info = {
256	.num_channels = 8,
257};
258
259static const struct ingenic_soc_info jz4725b_soc_info = {
260	.num_channels = 6,
261};
262
263static const struct of_device_id ingenic_tcu_of_match[] = {
264	{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
265	{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
266	{ .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, },
267	{ .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
268	{ .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
269	{ /* sentinel */ }
270};
271
272static int __init ingenic_tcu_init(struct device_node *np)
273{
274	const struct of_device_id *id = of_match_node(ingenic_tcu_of_match, np);
275	const struct ingenic_soc_info *soc_info = id->data;
276	struct ingenic_tcu_timer *timer;
277	struct ingenic_tcu *tcu;
278	struct regmap *map;
279	unsigned int cpu;
280	int ret, last_bit = -1;
281	long rate;
282
283	of_node_clear_flag(np, OF_POPULATED);
284
285	map = device_node_to_regmap(np);
286	if (IS_ERR(map))
287		return PTR_ERR(map);
288
289	tcu = kzalloc(struct_size(tcu, timers, num_possible_cpus()),
290		      GFP_KERNEL);
291	if (!tcu)
292		return -ENOMEM;
293
294	/*
295	 * Enable all TCU channels for PWM use by default except channels 0/1,
296	 * and channel 2 if target CPU is JZ4780/X2000 and SMP is selected.
297	 */
298	tcu->pwm_channels_mask = GENMASK(soc_info->num_channels - 1,
299					 num_possible_cpus() + 1);
300	of_property_read_u32(np, "ingenic,pwm-channels-mask",
301			     (u32 *)&tcu->pwm_channels_mask);
302
303	/* Verify that we have at least num_possible_cpus() + 1 free channels */
304	if (hweight8(tcu->pwm_channels_mask) >
305			soc_info->num_channels - num_possible_cpus() + 1) {
306		pr_crit("%s: Invalid PWM channel mask: 0x%02lx\n", __func__,
307			tcu->pwm_channels_mask);
308		ret = -EINVAL;
309		goto err_free_ingenic_tcu;
310	}
311
312	tcu->map = map;
313	tcu->np = np;
314	ingenic_tcu = tcu;
315
316	for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
317		timer = &tcu->timers[cpu];
318
319		timer->cpu = cpu;
320		timer->channel = find_next_zero_bit(&tcu->pwm_channels_mask,
321						  soc_info->num_channels,
322						  last_bit + 1);
323		last_bit = timer->channel;
324	}
325
326	tcu->cs_channel = find_next_zero_bit(&tcu->pwm_channels_mask,
327					     soc_info->num_channels,
328					     last_bit + 1);
329
330	ret = ingenic_tcu_clocksource_init(np, tcu);
331	if (ret) {
332		pr_crit("%s: Unable to init clocksource: %d\n", __func__, ret);
333		goto err_free_ingenic_tcu;
334	}
335
336	/* Setup clock events on each CPU core */
337	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "Ingenic XBurst: online",
338				ingenic_tcu_setup_cevt, NULL);
339	if (ret < 0) {
340		pr_crit("%s: Unable to start CPU timers: %d\n", __func__, ret);
341		goto err_tcu_clocksource_cleanup;
342	}
343
344	/* Register the sched_clock at the end as there's no way to undo it */
345	rate = clk_get_rate(tcu->cs_clk);
346	sched_clock_register(ingenic_tcu_timer_read, 16, rate);
347
348	return 0;
349
350err_tcu_clocksource_cleanup:
351	clocksource_unregister(&tcu->cs);
352	clk_disable_unprepare(tcu->cs_clk);
353	clk_put(tcu->cs_clk);
354err_free_ingenic_tcu:
355	kfree(tcu);
356	return ret;
357}
358
359TIMER_OF_DECLARE(jz4740_tcu_intc,  "ingenic,jz4740-tcu",  ingenic_tcu_init);
360TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
361TIMER_OF_DECLARE(jz4760_tcu_intc,  "ingenic,jz4760-tcu",  ingenic_tcu_init);
362TIMER_OF_DECLARE(jz4770_tcu_intc,  "ingenic,jz4770-tcu",  ingenic_tcu_init);
363TIMER_OF_DECLARE(x1000_tcu_intc,  "ingenic,x1000-tcu",  ingenic_tcu_init);
364
365static int __init ingenic_tcu_probe(struct platform_device *pdev)
366{
367	platform_set_drvdata(pdev, ingenic_tcu);
368
369	return 0;
370}
371
372static int ingenic_tcu_suspend(struct device *dev)
373{
374	struct ingenic_tcu *tcu = dev_get_drvdata(dev);
375	unsigned int cpu;
376
377	clk_disable(tcu->cs_clk);
378
379	for (cpu = 0; cpu < num_online_cpus(); cpu++)
380		clk_disable(tcu->timers[cpu].clk);
381
382	return 0;
383}
384
385static int ingenic_tcu_resume(struct device *dev)
386{
387	struct ingenic_tcu *tcu = dev_get_drvdata(dev);
388	unsigned int cpu;
389	int ret;
390
391	for (cpu = 0; cpu < num_online_cpus(); cpu++) {
392		ret = clk_enable(tcu->timers[cpu].clk);
393		if (ret)
394			goto err_timer_clk_disable;
395	}
396
397	ret = clk_enable(tcu->cs_clk);
398	if (ret)
399		goto err_timer_clk_disable;
400
401	return 0;
402
403err_timer_clk_disable:
404	for (; cpu > 0; cpu--)
405		clk_disable(tcu->timers[cpu - 1].clk);
406	return ret;
407}
408
409static const struct dev_pm_ops ingenic_tcu_pm_ops = {
410	/* _noirq: We want the TCU clocks to be gated last / ungated first */
411	.suspend_noirq = ingenic_tcu_suspend,
412	.resume_noirq  = ingenic_tcu_resume,
413};
414
415static struct platform_driver ingenic_tcu_driver = {
416	.driver = {
417		.name	= "ingenic-tcu-timer",
418		.pm	= pm_sleep_ptr(&ingenic_tcu_pm_ops),
 
 
419		.of_match_table = ingenic_tcu_of_match,
420	},
421};
422builtin_platform_driver_probe(ingenic_tcu_driver, ingenic_tcu_probe);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Ingenic SoCs TCU IRQ driver
  4 * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
  5 * Copyright (C) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
  6 */
  7
  8#include <linux/bitops.h>
  9#include <linux/clk.h>
 10#include <linux/clockchips.h>
 11#include <linux/clocksource.h>
 
 12#include <linux/interrupt.h>
 13#include <linux/mfd/ingenic-tcu.h>
 14#include <linux/mfd/syscon.h>
 15#include <linux/of.h>
 16#include <linux/of_address.h>
 17#include <linux/of_irq.h>
 18#include <linux/of_platform.h>
 19#include <linux/overflow.h>
 20#include <linux/platform_device.h>
 21#include <linux/regmap.h>
 22#include <linux/sched_clock.h>
 23
 24#include <dt-bindings/clock/ingenic,tcu.h>
 25
 26static DEFINE_PER_CPU(call_single_data_t, ingenic_cevt_csd);
 27
 28struct ingenic_soc_info {
 29	unsigned int num_channels;
 30};
 31
 32struct ingenic_tcu_timer {
 33	unsigned int cpu;
 34	unsigned int channel;
 35	struct clock_event_device cevt;
 36	struct clk *clk;
 37	char name[8];
 38};
 39
 40struct ingenic_tcu {
 41	struct regmap *map;
 42	struct device_node *np;
 43	struct clk *cs_clk;
 44	unsigned int cs_channel;
 45	struct clocksource cs;
 46	unsigned long pwm_channels_mask;
 47	struct ingenic_tcu_timer timers[];
 48};
 49
 50static struct ingenic_tcu *ingenic_tcu;
 51
 52static u64 notrace ingenic_tcu_timer_read(void)
 53{
 54	struct ingenic_tcu *tcu = ingenic_tcu;
 55	unsigned int count;
 56
 57	regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count);
 58
 59	return count;
 60}
 61
 62static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs)
 63{
 64	return ingenic_tcu_timer_read();
 65}
 66
 67static inline struct ingenic_tcu *
 68to_ingenic_tcu(struct ingenic_tcu_timer *timer)
 69{
 70	return container_of(timer, struct ingenic_tcu, timers[timer->cpu]);
 71}
 72
 73static inline struct ingenic_tcu_timer *
 74to_ingenic_tcu_timer(struct clock_event_device *evt)
 75{
 76	return container_of(evt, struct ingenic_tcu_timer, cevt);
 77}
 78
 79static int ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device *evt)
 80{
 81	struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
 82	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
 83
 84	regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
 85
 86	return 0;
 87}
 88
 89static int ingenic_tcu_cevt_set_next(unsigned long next,
 90				     struct clock_event_device *evt)
 91{
 92	struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
 93	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
 94
 95	if (next > 0xffff)
 96		return -EINVAL;
 97
 98	regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next);
 99	regmap_write(tcu->map, TCU_REG_TCNTc(timer->channel), 0);
100	regmap_write(tcu->map, TCU_REG_TESR, BIT(timer->channel));
101
102	return 0;
103}
104
105static void ingenic_per_cpu_event_handler(void *info)
106{
107	struct clock_event_device *cevt = (struct clock_event_device *) info;
108
109	cevt->event_handler(cevt);
110}
111
112static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
113{
114	struct ingenic_tcu_timer *timer = dev_id;
115	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
116	call_single_data_t *csd;
117
118	regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
119
120	if (timer->cevt.event_handler) {
121		csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
122		csd->info = (void *) &timer->cevt;
123		csd->func = ingenic_per_cpu_event_handler;
124		smp_call_function_single_async(timer->cpu, csd);
125	}
126
127	return IRQ_HANDLED;
128}
129
130static struct clk *ingenic_tcu_get_clock(struct device_node *np, int id)
131{
132	struct of_phandle_args args;
133
134	args.np = np;
135	args.args_count = 1;
136	args.args[0] = id;
137
138	return of_clk_get_from_provider(&args);
139}
140
141static int ingenic_tcu_setup_cevt(unsigned int cpu)
142{
143	struct ingenic_tcu *tcu = ingenic_tcu;
144	struct ingenic_tcu_timer *timer = &tcu->timers[cpu];
145	unsigned int timer_virq;
146	struct irq_domain *domain;
147	unsigned long rate;
148	int err;
149
150	timer->clk = ingenic_tcu_get_clock(tcu->np, timer->channel);
151	if (IS_ERR(timer->clk))
152		return PTR_ERR(timer->clk);
153
154	err = clk_prepare_enable(timer->clk);
155	if (err)
156		goto err_clk_put;
157
158	rate = clk_get_rate(timer->clk);
159	if (!rate) {
160		err = -EINVAL;
161		goto err_clk_disable;
162	}
163
164	domain = irq_find_host(tcu->np);
165	if (!domain) {
166		err = -ENODEV;
167		goto err_clk_disable;
168	}
169
170	timer_virq = irq_create_mapping(domain, timer->channel);
171	if (!timer_virq) {
172		err = -EINVAL;
173		goto err_clk_disable;
174	}
175
176	snprintf(timer->name, sizeof(timer->name), "TCU%u", timer->channel);
177
178	err = request_irq(timer_virq, ingenic_tcu_cevt_cb, IRQF_TIMER,
179			  timer->name, timer);
180	if (err)
181		goto err_irq_dispose_mapping;
182
183	timer->cpu = smp_processor_id();
184	timer->cevt.cpumask = cpumask_of(smp_processor_id());
185	timer->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
186	timer->cevt.name = timer->name;
187	timer->cevt.rating = 200;
188	timer->cevt.set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown;
189	timer->cevt.set_next_event = ingenic_tcu_cevt_set_next;
190
191	clockevents_config_and_register(&timer->cevt, rate, 10, 0xffff);
192
193	return 0;
194
195err_irq_dispose_mapping:
196	irq_dispose_mapping(timer_virq);
197err_clk_disable:
198	clk_disable_unprepare(timer->clk);
199err_clk_put:
200	clk_put(timer->clk);
201	return err;
202}
203
204static int __init ingenic_tcu_clocksource_init(struct device_node *np,
205					       struct ingenic_tcu *tcu)
206{
207	unsigned int channel = tcu->cs_channel;
208	struct clocksource *cs = &tcu->cs;
209	unsigned long rate;
210	int err;
211
212	tcu->cs_clk = ingenic_tcu_get_clock(np, channel);
213	if (IS_ERR(tcu->cs_clk))
214		return PTR_ERR(tcu->cs_clk);
215
216	err = clk_prepare_enable(tcu->cs_clk);
217	if (err)
218		goto err_clk_put;
219
220	rate = clk_get_rate(tcu->cs_clk);
221	if (!rate) {
222		err = -EINVAL;
223		goto err_clk_disable;
224	}
225
226	/* Reset channel */
227	regmap_update_bits(tcu->map, TCU_REG_TCSRc(channel),
228			   0xffff & ~TCU_TCSR_RESERVED_BITS, 0);
229
230	/* Reset counter */
231	regmap_write(tcu->map, TCU_REG_TDFRc(channel), 0xffff);
232	regmap_write(tcu->map, TCU_REG_TCNTc(channel), 0);
233
234	/* Enable channel */
235	regmap_write(tcu->map, TCU_REG_TESR, BIT(channel));
236
237	cs->name = "ingenic-timer";
238	cs->rating = 200;
239	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
240	cs->mask = CLOCKSOURCE_MASK(16);
241	cs->read = ingenic_tcu_timer_cs_read;
242
243	err = clocksource_register_hz(cs, rate);
244	if (err)
245		goto err_clk_disable;
246
247	return 0;
248
249err_clk_disable:
250	clk_disable_unprepare(tcu->cs_clk);
251err_clk_put:
252	clk_put(tcu->cs_clk);
253	return err;
254}
255
256static const struct ingenic_soc_info jz4740_soc_info = {
257	.num_channels = 8,
258};
259
260static const struct ingenic_soc_info jz4725b_soc_info = {
261	.num_channels = 6,
262};
263
264static const struct of_device_id ingenic_tcu_of_match[] = {
265	{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
266	{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
267	{ .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, },
268	{ .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
269	{ .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
270	{ /* sentinel */ }
271};
272
273static int __init ingenic_tcu_init(struct device_node *np)
274{
275	const struct of_device_id *id = of_match_node(ingenic_tcu_of_match, np);
276	const struct ingenic_soc_info *soc_info = id->data;
277	struct ingenic_tcu_timer *timer;
278	struct ingenic_tcu *tcu;
279	struct regmap *map;
280	unsigned int cpu;
281	int ret, last_bit = -1;
282	long rate;
283
284	of_node_clear_flag(np, OF_POPULATED);
285
286	map = device_node_to_regmap(np);
287	if (IS_ERR(map))
288		return PTR_ERR(map);
289
290	tcu = kzalloc(struct_size(tcu, timers, num_possible_cpus()),
291		      GFP_KERNEL);
292	if (!tcu)
293		return -ENOMEM;
294
295	/*
296	 * Enable all TCU channels for PWM use by default except channels 0/1,
297	 * and channel 2 if target CPU is JZ4780/X2000 and SMP is selected.
298	 */
299	tcu->pwm_channels_mask = GENMASK(soc_info->num_channels - 1,
300					 num_possible_cpus() + 1);
301	of_property_read_u32(np, "ingenic,pwm-channels-mask",
302			     (u32 *)&tcu->pwm_channels_mask);
303
304	/* Verify that we have at least num_possible_cpus() + 1 free channels */
305	if (hweight8(tcu->pwm_channels_mask) >
306			soc_info->num_channels - num_possible_cpus() + 1) {
307		pr_crit("%s: Invalid PWM channel mask: 0x%02lx\n", __func__,
308			tcu->pwm_channels_mask);
309		ret = -EINVAL;
310		goto err_free_ingenic_tcu;
311	}
312
313	tcu->map = map;
314	tcu->np = np;
315	ingenic_tcu = tcu;
316
317	for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
318		timer = &tcu->timers[cpu];
319
320		timer->cpu = cpu;
321		timer->channel = find_next_zero_bit(&tcu->pwm_channels_mask,
322						  soc_info->num_channels,
323						  last_bit + 1);
324		last_bit = timer->channel;
325	}
326
327	tcu->cs_channel = find_next_zero_bit(&tcu->pwm_channels_mask,
328					     soc_info->num_channels,
329					     last_bit + 1);
330
331	ret = ingenic_tcu_clocksource_init(np, tcu);
332	if (ret) {
333		pr_crit("%s: Unable to init clocksource: %d\n", __func__, ret);
334		goto err_free_ingenic_tcu;
335	}
336
337	/* Setup clock events on each CPU core */
338	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "Ingenic XBurst: online",
339				ingenic_tcu_setup_cevt, NULL);
340	if (ret < 0) {
341		pr_crit("%s: Unable to start CPU timers: %d\n", __func__, ret);
342		goto err_tcu_clocksource_cleanup;
343	}
344
345	/* Register the sched_clock at the end as there's no way to undo it */
346	rate = clk_get_rate(tcu->cs_clk);
347	sched_clock_register(ingenic_tcu_timer_read, 16, rate);
348
349	return 0;
350
351err_tcu_clocksource_cleanup:
352	clocksource_unregister(&tcu->cs);
353	clk_disable_unprepare(tcu->cs_clk);
354	clk_put(tcu->cs_clk);
355err_free_ingenic_tcu:
356	kfree(tcu);
357	return ret;
358}
359
360TIMER_OF_DECLARE(jz4740_tcu_intc,  "ingenic,jz4740-tcu",  ingenic_tcu_init);
361TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
362TIMER_OF_DECLARE(jz4760_tcu_intc,  "ingenic,jz4760-tcu",  ingenic_tcu_init);
363TIMER_OF_DECLARE(jz4770_tcu_intc,  "ingenic,jz4770-tcu",  ingenic_tcu_init);
364TIMER_OF_DECLARE(x1000_tcu_intc,  "ingenic,x1000-tcu",  ingenic_tcu_init);
365
366static int __init ingenic_tcu_probe(struct platform_device *pdev)
367{
368	platform_set_drvdata(pdev, ingenic_tcu);
369
370	return 0;
371}
372
373static int __maybe_unused ingenic_tcu_suspend(struct device *dev)
374{
375	struct ingenic_tcu *tcu = dev_get_drvdata(dev);
376	unsigned int cpu;
377
378	clk_disable(tcu->cs_clk);
379
380	for (cpu = 0; cpu < num_online_cpus(); cpu++)
381		clk_disable(tcu->timers[cpu].clk);
382
383	return 0;
384}
385
386static int __maybe_unused ingenic_tcu_resume(struct device *dev)
387{
388	struct ingenic_tcu *tcu = dev_get_drvdata(dev);
389	unsigned int cpu;
390	int ret;
391
392	for (cpu = 0; cpu < num_online_cpus(); cpu++) {
393		ret = clk_enable(tcu->timers[cpu].clk);
394		if (ret)
395			goto err_timer_clk_disable;
396	}
397
398	ret = clk_enable(tcu->cs_clk);
399	if (ret)
400		goto err_timer_clk_disable;
401
402	return 0;
403
404err_timer_clk_disable:
405	for (; cpu > 0; cpu--)
406		clk_disable(tcu->timers[cpu - 1].clk);
407	return ret;
408}
409
410static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = {
411	/* _noirq: We want the TCU clocks to be gated last / ungated first */
412	.suspend_noirq = ingenic_tcu_suspend,
413	.resume_noirq  = ingenic_tcu_resume,
414};
415
416static struct platform_driver ingenic_tcu_driver = {
417	.driver = {
418		.name	= "ingenic-tcu-timer",
419#ifdef CONFIG_PM_SLEEP
420		.pm	= &ingenic_tcu_pm_ops,
421#endif
422		.of_match_table = ingenic_tcu_of_match,
423	},
424};
425builtin_platform_driver_probe(ingenic_tcu_driver, ingenic_tcu_probe);