Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2021 Alibaba Group Holding Limited.
  4 */
  5
  6#include <linux/clk.h>
  7#include <linux/interrupt.h>
  8#include <linux/io.h>
  9#include <linux/kernel.h>
 10#include <linux/mailbox_controller.h>
 11#include <linux/module.h>
 12#include <linux/of_device.h>
 13#include <linux/platform_device.h>
 14#include <linux/slab.h>
 15
 16/* Status Register */
 17#define TH_1520_MBOX_STA 0x0
 18#define TH_1520_MBOX_CLR 0x4
 19#define TH_1520_MBOX_MASK 0xc
 20
 21/* Transmit/receive data register:
 22 * INFO0 ~ INFO6
 23 */
 24#define TH_1520_MBOX_INFO_NUM 8
 25#define TH_1520_MBOX_DATA_INFO_NUM 7
 26#define TH_1520_MBOX_INFO0 0x14
 27/* Transmit ack register: INFO7 */
 28#define TH_1520_MBOX_INFO7 0x30
 29
 30/* Generate remote icu IRQ Register */
 31#define TH_1520_MBOX_GEN 0x10
 32#define TH_1520_MBOX_GEN_RX_DATA BIT(6)
 33#define TH_1520_MBOX_GEN_TX_ACK BIT(7)
 34
 35#define TH_1520_MBOX_CHAN_RES_SIZE 0x1000
 36#define TH_1520_MBOX_CHANS 4
 37#define TH_1520_MBOX_CHAN_NAME_SIZE 20
 38
 39#define TH_1520_MBOX_ACK_MAGIC 0xdeadbeaf
 40
 41#ifdef CONFIG_PM_SLEEP
 42/* store MBOX context across system-wide suspend/resume transitions */
 43struct th1520_mbox_context {
 44	u32 intr_mask[TH_1520_MBOX_CHANS];
 45};
 46#endif
 47
 48enum th1520_mbox_icu_cpu_id {
 49	TH_1520_MBOX_ICU_KERNEL_CPU0, /* 910T */
 50	TH_1520_MBOX_ICU_CPU1, /* 902 */
 51	TH_1520_MBOX_ICU_CPU2, /* 906 */
 52	TH_1520_MBOX_ICU_CPU3, /* 910R */
 53};
 54
 55struct th1520_mbox_con_priv {
 56	enum th1520_mbox_icu_cpu_id idx;
 57	void __iomem *comm_local_base;
 58	void __iomem *comm_remote_base;
 59	char irq_desc[TH_1520_MBOX_CHAN_NAME_SIZE];
 60	struct mbox_chan *chan;
 61};
 62
 63struct th1520_mbox_priv {
 64	struct device *dev;
 65	void __iomem *local_icu[TH_1520_MBOX_CHANS];
 66	void __iomem *remote_icu[TH_1520_MBOX_CHANS - 1];
 67	void __iomem *cur_cpu_ch_base;
 68	spinlock_t mbox_lock; /* control register lock */
 69
 70	struct mbox_controller mbox;
 71	struct mbox_chan mbox_chans[TH_1520_MBOX_CHANS];
 72	struct clk_bulk_data clocks[TH_1520_MBOX_CHANS];
 73	struct th1520_mbox_con_priv con_priv[TH_1520_MBOX_CHANS];
 74	int irq;
 75#ifdef CONFIG_PM_SLEEP
 76	struct th1520_mbox_context *ctx;
 77#endif
 78};
 79
 80static struct th1520_mbox_priv *
 81to_th1520_mbox_priv(struct mbox_controller *mbox)
 82{
 83	return container_of(mbox, struct th1520_mbox_priv, mbox);
 84}
 85
 86static void th1520_mbox_write(struct th1520_mbox_priv *priv, u32 val, u32 offs)
 87{
 88	iowrite32(val, priv->cur_cpu_ch_base + offs);
 89}
 90
 91static u32 th1520_mbox_read(struct th1520_mbox_priv *priv, u32 offs)
 92{
 93	return ioread32(priv->cur_cpu_ch_base + offs);
 94}
 95
 96static u32 th1520_mbox_rmw(struct th1520_mbox_priv *priv, u32 off, u32 set,
 97			   u32 clr)
 98{
 99	unsigned long flags;
100	u32 val;
101
102	spin_lock_irqsave(&priv->mbox_lock, flags);
103	val = th1520_mbox_read(priv, off);
104	val &= ~clr;
105	val |= set;
106	th1520_mbox_write(priv, val, off);
107	spin_unlock_irqrestore(&priv->mbox_lock, flags);
108
109	return val;
110}
111
112static void th1520_mbox_chan_write(struct th1520_mbox_con_priv *cp, u32 val,
113				   u32 offs, bool is_remote)
114{
115	if (is_remote)
116		iowrite32(val, cp->comm_remote_base + offs);
117	else
118		iowrite32(val, cp->comm_local_base + offs);
119}
120
121static u32 th1520_mbox_chan_read(struct th1520_mbox_con_priv *cp, u32 offs,
122				 bool is_remote)
123{
124	if (is_remote)
125		return ioread32(cp->comm_remote_base + offs);
126	else
127		return ioread32(cp->comm_local_base + offs);
128}
129
130static void th1520_mbox_chan_rmw(struct th1520_mbox_con_priv *cp, u32 off,
131				 u32 set, u32 clr, bool is_remote)
132{
133	struct th1520_mbox_priv *priv = to_th1520_mbox_priv(cp->chan->mbox);
134	unsigned long flags;
135	u32 val;
136
137	spin_lock_irqsave(&priv->mbox_lock, flags);
138	val = th1520_mbox_chan_read(cp, off, is_remote);
139	val &= ~clr;
140	val |= set;
141	th1520_mbox_chan_write(cp, val, off, is_remote);
142	spin_unlock_irqrestore(&priv->mbox_lock, flags);
143}
144
145static void th1520_mbox_chan_rd_data(struct th1520_mbox_con_priv *cp,
146				     void *data, bool is_remote)
147{
148	u32 off = TH_1520_MBOX_INFO0;
149	u32 *arg = data;
150	u32 i;
151
152	/* read info0 ~ info6, totally 28 bytes
153	 * requires data memory size is 28 bytes
154	 */
155	for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) {
156		*arg = th1520_mbox_chan_read(cp, off, is_remote);
157		off += 4;
158		arg++;
159	}
160}
161
162static void th1520_mbox_chan_wr_data(struct th1520_mbox_con_priv *cp,
163				     void *data, bool is_remote)
164{
165	u32 off = TH_1520_MBOX_INFO0;
166	u32 *arg = data;
167	u32 i;
168
169	/* write info0 ~ info6, totally 28 bytes
170	 * requires data memory is 28 bytes valid data
171	 */
172	for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) {
173		th1520_mbox_chan_write(cp, *arg, off, is_remote);
174		off += 4;
175		arg++;
176	}
177}
178
179static void th1520_mbox_chan_wr_ack(struct th1520_mbox_con_priv *cp, void *data,
180				    bool is_remote)
181{
182	u32 off = TH_1520_MBOX_INFO7;
183	u32 *arg = data;
184
185	th1520_mbox_chan_write(cp, *arg, off, is_remote);
186}
187
188static int th1520_mbox_chan_id_to_mapbit(struct th1520_mbox_con_priv *cp)
189{
190	int mapbit = 0;
191	int i;
192
193	for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
194		if (i == cp->idx)
195			return mapbit;
196
197		if (i != TH_1520_MBOX_ICU_KERNEL_CPU0)
198			mapbit++;
199	}
200
201	if (i == TH_1520_MBOX_CHANS)
202		dev_err(cp->chan->mbox->dev, "convert to mapbit failed\n");
203
204	return 0;
205}
206
207static irqreturn_t th1520_mbox_isr(int irq, void *p)
208{
209	struct mbox_chan *chan = p;
210	struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox);
211	struct th1520_mbox_con_priv *cp = chan->con_priv;
212	int mapbit = th1520_mbox_chan_id_to_mapbit(cp);
213	u32 sta, dat[TH_1520_MBOX_DATA_INFO_NUM];
214	u32 ack_magic = TH_1520_MBOX_ACK_MAGIC;
215	u32 info0_data, info7_data;
216
217	sta = th1520_mbox_read(priv, TH_1520_MBOX_STA);
218	if (!(sta & BIT(mapbit)))
219		return IRQ_NONE;
220
221	/* clear chan irq bit in STA register */
222	th1520_mbox_rmw(priv, TH_1520_MBOX_CLR, BIT(mapbit), 0);
223
224	/* info0 is the protocol word, should not be zero! */
225	info0_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO0, false);
226	if (info0_data) {
227		/* read info0~info6 data */
228		th1520_mbox_chan_rd_data(cp, dat, false);
229
230		/* clear local info0 */
231		th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO0, false);
232
233		/* notify remote cpu */
234		th1520_mbox_chan_wr_ack(cp, &ack_magic, true);
235		/* CPU1 902/906 use polling mode to monitor info7 */
236		if (cp->idx != TH_1520_MBOX_ICU_CPU1 &&
237		    cp->idx != TH_1520_MBOX_ICU_CPU2)
238			th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN,
239					     TH_1520_MBOX_GEN_TX_ACK, 0, true);
240
241		/* transfer the data to client */
242		mbox_chan_received_data(chan, (void *)dat);
243	}
244
245	/* info7 magic value mean the real ack signal, not generate bit7 */
246	info7_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO7, false);
247	if (info7_data == TH_1520_MBOX_ACK_MAGIC) {
248		/* clear local info7 */
249		th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO7, false);
250
251		/* notify framework the last TX has completed */
252		mbox_chan_txdone(chan, 0);
253	}
254
255	if (!info0_data && !info7_data)
256		return IRQ_NONE;
257
258	return IRQ_HANDLED;
259}
260
261static int th1520_mbox_send_data(struct mbox_chan *chan, void *data)
262{
263	struct th1520_mbox_con_priv *cp = chan->con_priv;
264
265	th1520_mbox_chan_wr_data(cp, data, true);
266	th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, TH_1520_MBOX_GEN_RX_DATA, 0,
267			     true);
268	return 0;
269}
270
271static int th1520_mbox_startup(struct mbox_chan *chan)
272{
273	struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox);
274	struct th1520_mbox_con_priv *cp = chan->con_priv;
275	u32 data[8] = {};
276	int mask_bit;
277	int ret;
278
279	/* clear local and remote generate and info0~info7 */
280	th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, true);
281	th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, false);
282	th1520_mbox_chan_wr_ack(cp, &data[7], true);
283	th1520_mbox_chan_wr_ack(cp, &data[7], false);
284	th1520_mbox_chan_wr_data(cp, &data[0], true);
285	th1520_mbox_chan_wr_data(cp, &data[0], false);
286
287	/* enable the chan mask */
288	mask_bit = th1520_mbox_chan_id_to_mapbit(cp);
289	th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, BIT(mask_bit), 0);
290
291	/*
292	 * Mixing devm_ managed resources with manual IRQ handling is generally
293	 * discouraged due to potential complexities with resource management,
294	 * especially when dealing with shared interrupts. However, in this case,
295	 * the approach is safe and effective because:
296	 *
297	 * 1. Each mailbox channel requests its IRQ within the .startup() callback
298	 *    and frees it within the .shutdown() callback.
299	 * 2. During device unbinding, the devm_ managed mailbox controller first
300	 *    iterates through all channels, ensuring that their IRQs are freed before
301	 *    any other devm_ resources are released.
302	 *
303	 * This ordering guarantees that no interrupts can be triggered from the device
304	 * while it is being unbound, preventing race conditions and ensuring system
305	 * stability.
306	 */
307	ret = request_irq(priv->irq, th1520_mbox_isr,
308			  IRQF_SHARED | IRQF_NO_SUSPEND, cp->irq_desc, chan);
309	if (ret) {
310		dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq);
311		return ret;
312	}
313
314	return 0;
315}
316
317static void th1520_mbox_shutdown(struct mbox_chan *chan)
318{
319	struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox);
320	struct th1520_mbox_con_priv *cp = chan->con_priv;
321	int mask_bit;
322
323	free_irq(priv->irq, chan);
324
325	/* clear the chan mask */
326	mask_bit = th1520_mbox_chan_id_to_mapbit(cp);
327	th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, 0, BIT(mask_bit));
328}
329
330static const struct mbox_chan_ops th1520_mbox_ops = {
331	.send_data = th1520_mbox_send_data,
332	.startup = th1520_mbox_startup,
333	.shutdown = th1520_mbox_shutdown,
334};
335
336static int th1520_mbox_init_generic(struct th1520_mbox_priv *priv)
337{
338#ifdef CONFIG_PM_SLEEP
339	priv->ctx = devm_kzalloc(priv->dev, sizeof(*priv->ctx), GFP_KERNEL);
340	if (!priv->ctx)
341		return -ENOMEM;
342#endif
343	/* Set default configuration */
344	th1520_mbox_write(priv, 0xff, TH_1520_MBOX_CLR);
345	th1520_mbox_write(priv, 0x0, TH_1520_MBOX_MASK);
346	return 0;
347}
348
349static struct mbox_chan *th1520_mbox_xlate(struct mbox_controller *mbox,
350					   const struct of_phandle_args *sp)
351{
352	u32 chan;
353
354	if (sp->args_count != 1) {
355		dev_err(mbox->dev, "Invalid argument count %d\n",
356			sp->args_count);
357		return ERR_PTR(-EINVAL);
358	}
359
360	chan = sp->args[0]; /* comm remote channel */
361
362	if (chan >= mbox->num_chans) {
363		dev_err(mbox->dev, "Not supported channel number: %d\n", chan);
364		return ERR_PTR(-EINVAL);
365	}
366
367	if (chan == TH_1520_MBOX_ICU_KERNEL_CPU0) {
368		dev_err(mbox->dev, "Cannot communicate with yourself\n");
369		return ERR_PTR(-EINVAL);
370	}
371
372	return &mbox->chans[chan];
373}
374
375static void __iomem *th1520_map_mmio(struct platform_device *pdev,
376				     char *res_name, size_t offset)
377{
378	void __iomem *mapped;
379	struct resource *res;
380
381	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
382
383	if (!res) {
384		dev_err(&pdev->dev, "Failed to get resource: %s\n", res_name);
385		return ERR_PTR(-EINVAL);
386	}
387
388	mapped = devm_ioremap(&pdev->dev, res->start + offset,
389			      resource_size(res) - offset);
390	if (!mapped) {
391		dev_err(&pdev->dev, "Failed to map resource: %s\n", res_name);
392		return ERR_PTR(-ENOMEM);
393	}
394
395	return mapped;
396}
397
398static void th1520_disable_clk(void *data)
399{
400	struct th1520_mbox_priv *priv = data;
401
402	clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
403}
404
405static int th1520_mbox_probe(struct platform_device *pdev)
406{
407	struct device *dev = &pdev->dev;
408	struct th1520_mbox_priv *priv;
409	unsigned int remote_idx = 0;
410	unsigned int i;
411	int ret;
412
413	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
414	if (!priv)
415		return -ENOMEM;
416
417	priv->dev = dev;
418
419	priv->clocks[0].id = "clk-local";
420	priv->clocks[1].id = "clk-remote-icu0";
421	priv->clocks[2].id = "clk-remote-icu1";
422	priv->clocks[3].id = "clk-remote-icu2";
423
424	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clocks),
425				priv->clocks);
426	if (ret) {
427		dev_err(dev, "Failed to get clocks\n");
428		return ret;
429	}
430
431	ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks);
432	if (ret) {
433		dev_err(dev, "Failed to enable clocks\n");
434		return ret;
435	}
436
437	ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv);
438	if (ret) {
439		clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
440		return ret;
441	}
442
443	/*
444	 * The address mappings in the device tree align precisely with those
445	 * outlined in the manual. However, register offsets within these
446	 * mapped regions are irregular, particularly for remote-icu0.
447	 * Consequently, th1520_map_mmio() requires an additional parameter to
448	 * handle this quirk.
449	 */
450	priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] =
451		th1520_map_mmio(pdev, "local", 0x0);
452	if (IS_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]))
453		return PTR_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]);
454
455	priv->remote_icu[0] = th1520_map_mmio(pdev, "remote-icu0", 0x4000);
456	if (IS_ERR(priv->remote_icu[0]))
457		return PTR_ERR(priv->remote_icu[0]);
458
459	priv->remote_icu[1] = th1520_map_mmio(pdev, "remote-icu1", 0x0);
460	if (IS_ERR(priv->remote_icu[1]))
461		return PTR_ERR(priv->remote_icu[1]);
462
463	priv->remote_icu[2] = th1520_map_mmio(pdev, "remote-icu2", 0x0);
464	if (IS_ERR(priv->remote_icu[2]))
465		return PTR_ERR(priv->remote_icu[2]);
466
467	priv->local_icu[TH_1520_MBOX_ICU_CPU1] =
468		priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] +
469		TH_1520_MBOX_CHAN_RES_SIZE;
470	priv->local_icu[TH_1520_MBOX_ICU_CPU2] =
471		priv->local_icu[TH_1520_MBOX_ICU_CPU1] +
472		TH_1520_MBOX_CHAN_RES_SIZE;
473	priv->local_icu[TH_1520_MBOX_ICU_CPU3] =
474		priv->local_icu[TH_1520_MBOX_ICU_CPU2] +
475		TH_1520_MBOX_CHAN_RES_SIZE;
476
477	priv->cur_cpu_ch_base = priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0];
478
479	priv->irq = platform_get_irq(pdev, 0);
480	if (priv->irq < 0)
481		return priv->irq;
482
483	/* init the chans */
484	for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
485		struct th1520_mbox_con_priv *cp = &priv->con_priv[i];
486
487		cp->idx = i;
488		cp->chan = &priv->mbox_chans[i];
489		priv->mbox_chans[i].con_priv = cp;
490		snprintf(cp->irq_desc, sizeof(cp->irq_desc),
491			 "th1520_mbox_chan[%i]", cp->idx);
492
493		cp->comm_local_base = priv->local_icu[i];
494		if (i != TH_1520_MBOX_ICU_KERNEL_CPU0) {
495			cp->comm_remote_base = priv->remote_icu[remote_idx];
496			remote_idx++;
497		}
498	}
499
500	spin_lock_init(&priv->mbox_lock);
501
502	priv->mbox.dev = dev;
503	priv->mbox.ops = &th1520_mbox_ops;
504	priv->mbox.chans = priv->mbox_chans;
505	priv->mbox.num_chans = TH_1520_MBOX_CHANS;
506	priv->mbox.of_xlate = th1520_mbox_xlate;
507	priv->mbox.txdone_irq = true;
508
509	platform_set_drvdata(pdev, priv);
510
511	ret = th1520_mbox_init_generic(priv);
512	if (ret) {
513		dev_err(dev, "Failed to init mailbox context\n");
514		return ret;
515	}
516
517	return devm_mbox_controller_register(dev, &priv->mbox);
518}
519
520static const struct of_device_id th1520_mbox_dt_ids[] = {
521	{ .compatible = "thead,th1520-mbox" },
522	{}
523};
524MODULE_DEVICE_TABLE(of, th1520_mbox_dt_ids);
525
526#ifdef CONFIG_PM_SLEEP
527static int __maybe_unused th1520_mbox_suspend_noirq(struct device *dev)
528{
529	struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
530	struct th1520_mbox_context *ctx = priv->ctx;
531	u32 i;
532	/*
533	 * ONLY interrupt mask bit should be stored and restores.
534	 * INFO data all assumed to be lost.
535	 */
536	for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
537		ctx->intr_mask[i] =
538			ioread32(priv->local_icu[i] + TH_1520_MBOX_MASK);
539	}
540	return 0;
541}
542
543static int __maybe_unused th1520_mbox_resume_noirq(struct device *dev)
544{
545	struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
546	struct th1520_mbox_context *ctx = priv->ctx;
547	u32 i;
548
549	for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
550		iowrite32(ctx->intr_mask[i],
551			  priv->local_icu[i] + TH_1520_MBOX_MASK);
552	}
553
554	return 0;
555}
556#endif
557
558static int  __maybe_unused th1520_mbox_runtime_suspend(struct device *dev)
559{
560	struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
561
562	clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
563
564	return 0;
565}
566
567static int __maybe_unused th1520_mbox_runtime_resume(struct device *dev)
568{
569	struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
570	int ret;
571
572	ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks);
573	if (ret)
574		dev_err(dev, "Failed to enable clocks in runtime resume\n");
575
576	return ret;
577}
578
579static const struct dev_pm_ops th1520_mbox_pm_ops = {
580#ifdef CONFIG_PM_SLEEP
581	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(th1520_mbox_suspend_noirq,
582				      th1520_mbox_resume_noirq)
583#endif
584	SET_RUNTIME_PM_OPS(th1520_mbox_runtime_suspend,
585			   th1520_mbox_runtime_resume, NULL)
586};
587
588static struct platform_driver th1520_mbox_driver = {
589	.probe		= th1520_mbox_probe,
590	.driver = {
591		.name	= "th1520-mbox",
592		.of_match_table = th1520_mbox_dt_ids,
593		.pm = &th1520_mbox_pm_ops,
594	},
595};
596module_platform_driver(th1520_mbox_driver);
597
598MODULE_DESCRIPTION("Thead TH-1520 mailbox IPC driver");
599MODULE_LICENSE("GPL");