Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * MediaTek UART APDMA driver.
  4 *
  5 * Copyright (c) 2019 MediaTek Inc.
  6 * Author: Long Cheng <long.cheng@mediatek.com>
  7 */
  8
  9#include <linux/clk.h>
 10#include <linux/dmaengine.h>
 11#include <linux/dma-mapping.h>
 12#include <linux/err.h>
 13#include <linux/init.h>
 14#include <linux/interrupt.h>
 15#include <linux/iopoll.h>
 16#include <linux/kernel.h>
 17#include <linux/list.h>
 18#include <linux/module.h>
 
 19#include <linux/of_dma.h>
 20#include <linux/platform_device.h>
 21#include <linux/pm_runtime.h>
 22#include <linux/slab.h>
 23#include <linux/spinlock.h>
 24
 25#include "../virt-dma.h"
 26
 27/* The default number of virtual channel */
 28#define MTK_UART_APDMA_NR_VCHANS	8
 29
 30#define VFF_EN_B		BIT(0)
 31#define VFF_STOP_B		BIT(0)
 32#define VFF_FLUSH_B		BIT(0)
 33#define VFF_4G_EN_B		BIT(0)
 34/* rx valid size >=  vff thre */
 35#define VFF_RX_INT_EN_B		(BIT(0) | BIT(1))
 36/* tx left size >= vff thre */
 37#define VFF_TX_INT_EN_B		BIT(0)
 38#define VFF_WARM_RST_B		BIT(0)
 39#define VFF_RX_INT_CLR_B	(BIT(0) | BIT(1))
 40#define VFF_TX_INT_CLR_B	0
 41#define VFF_STOP_CLR_B		0
 42#define VFF_EN_CLR_B		0
 43#define VFF_INT_EN_CLR_B	0
 44#define VFF_4G_SUPPORT_CLR_B	0
 45
 46/*
 47 * interrupt trigger level for tx
 48 * if threshold is n, no polling is required to start tx.
 49 * otherwise need polling VFF_FLUSH.
 50 */
 51#define VFF_TX_THRE(n)		(n)
 52/* interrupt trigger level for rx */
 53#define VFF_RX_THRE(n)		((n) * 3 / 4)
 54
 55#define VFF_RING_SIZE	0xffff
 56/* invert this bit when wrap ring head again */
 57#define VFF_RING_WRAP	0x10000
 58
 59#define VFF_INT_FLAG		0x00
 60#define VFF_INT_EN		0x04
 61#define VFF_EN			0x08
 62#define VFF_RST			0x0c
 63#define VFF_STOP		0x10
 64#define VFF_FLUSH		0x14
 65#define VFF_ADDR		0x1c
 66#define VFF_LEN			0x24
 67#define VFF_THRE		0x28
 68#define VFF_WPT			0x2c
 69#define VFF_RPT			0x30
 70/* TX: the buffer size HW can read. RX: the buffer size SW can read. */
 71#define VFF_VALID_SIZE		0x3c
 72/* TX: the buffer size SW can write. RX: the buffer size HW can write. */
 73#define VFF_LEFT_SIZE		0x40
 74#define VFF_DEBUG_STATUS	0x50
 75#define VFF_4G_SUPPORT		0x54
 76
 77struct mtk_uart_apdmadev {
 78	struct dma_device ddev;
 79	struct clk *clk;
 80	bool support_33bits;
 81	unsigned int dma_requests;
 82};
 83
 84struct mtk_uart_apdma_desc {
 85	struct virt_dma_desc vd;
 86
 87	dma_addr_t addr;
 88	unsigned int avail_len;
 89};
 90
 91struct mtk_chan {
 92	struct virt_dma_chan vc;
 93	struct dma_slave_config	cfg;
 94	struct mtk_uart_apdma_desc *desc;
 95	enum dma_transfer_direction dir;
 96
 97	void __iomem *base;
 98	unsigned int irq;
 99
100	unsigned int rx_status;
101};
102
103static inline struct mtk_uart_apdmadev *
104to_mtk_uart_apdma_dev(struct dma_device *d)
105{
106	return container_of(d, struct mtk_uart_apdmadev, ddev);
107}
108
109static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
110{
111	return container_of(c, struct mtk_chan, vc.chan);
112}
113
114static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
115	(struct dma_async_tx_descriptor *t)
116{
117	return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
118}
119
120static void mtk_uart_apdma_write(struct mtk_chan *c,
121			       unsigned int reg, unsigned int val)
122{
123	writel(val, c->base + reg);
124}
125
126static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
127{
128	return readl(c->base + reg);
129}
130
131static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
132{
133	kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
 
 
 
134}
135
136static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
137{
138	struct mtk_uart_apdmadev *mtkd =
139				to_mtk_uart_apdma_dev(c->vc.chan.device);
140	struct mtk_uart_apdma_desc *d = c->desc;
141	unsigned int wpt, vff_sz;
142
143	vff_sz = c->cfg.dst_port_window_size;
144	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
145		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
146		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
147		mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
148		mtk_uart_apdma_write(c, VFF_WPT, 0);
149		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
150
151		if (mtkd->support_33bits)
152			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
153	}
154
155	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
156	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
157		dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
158
159	if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
160		mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
161		return;
162	}
163
164	wpt = mtk_uart_apdma_read(c, VFF_WPT);
165
166	wpt += c->desc->avail_len;
167	if ((wpt & VFF_RING_SIZE) == vff_sz)
168		wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
169
170	/* Let DMA start moving data */
171	mtk_uart_apdma_write(c, VFF_WPT, wpt);
172
173	/* HW auto set to 0 when left size >= threshold */
174	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
175	if (!mtk_uart_apdma_read(c, VFF_FLUSH))
176		mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
177}
178
179static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
180{
181	struct mtk_uart_apdmadev *mtkd =
182				to_mtk_uart_apdma_dev(c->vc.chan.device);
183	struct mtk_uart_apdma_desc *d = c->desc;
184	unsigned int vff_sz;
185
186	vff_sz = c->cfg.src_port_window_size;
187	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
188		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
189		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
190		mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
191		mtk_uart_apdma_write(c, VFF_RPT, 0);
192		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
193
194		if (mtkd->support_33bits)
195			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
196	}
197
198	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
199	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
200	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
201		dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
202}
203
204static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
205{
 
 
206	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
207	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
208	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
 
 
 
209}
210
211static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
212{
213	struct mtk_uart_apdma_desc *d = c->desc;
214	unsigned int len, wg, rg;
215	int cnt;
216
217	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
218
219	if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
220		return;
221
222	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
223	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
224
225	len = c->cfg.src_port_window_size;
226	rg = mtk_uart_apdma_read(c, VFF_RPT);
227	wg = mtk_uart_apdma_read(c, VFF_WPT);
228	cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
229
230	/*
231	 * The buffer is ring buffer. If wrap bit different,
232	 * represents the start of the next cycle for WPT
233	 */
234	if ((rg ^ wg) & VFF_RING_WRAP)
235		cnt += len;
236
237	c->rx_status = d->avail_len - cnt;
238	mtk_uart_apdma_write(c, VFF_RPT, wg);
239}
240
241static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
242{
243	struct mtk_uart_apdma_desc *d = c->desc;
244
245	if (d) {
246		list_del(&d->vd.node);
247		vchan_cookie_complete(&d->vd);
248		c->desc = NULL;
249	}
250}
251
252static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
253{
254	struct dma_chan *chan = (struct dma_chan *)dev_id;
255	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
256	unsigned long flags;
257
258	spin_lock_irqsave(&c->vc.lock, flags);
259	if (c->dir == DMA_DEV_TO_MEM)
260		mtk_uart_apdma_rx_handler(c);
261	else if (c->dir == DMA_MEM_TO_DEV)
262		mtk_uart_apdma_tx_handler(c);
263	mtk_uart_apdma_chan_complete_handler(c);
264	spin_unlock_irqrestore(&c->vc.lock, flags);
265
266	return IRQ_HANDLED;
267}
268
269static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
270{
271	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
272	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
273	unsigned int status;
274	int ret;
275
276	ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
277	if (ret < 0) {
278		pm_runtime_put_noidle(chan->device->dev);
279		return ret;
280	}
281
282	mtk_uart_apdma_write(c, VFF_ADDR, 0);
283	mtk_uart_apdma_write(c, VFF_THRE, 0);
284	mtk_uart_apdma_write(c, VFF_LEN, 0);
285	mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
286
287	ret = readx_poll_timeout(readl, c->base + VFF_EN,
288			  status, !status, 10, 100);
289	if (ret)
290		goto err_pm;
291
292	ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
293			  IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
294	if (ret < 0) {
295		dev_err(chan->device->dev, "Can't request dma IRQ\n");
296		ret = -EINVAL;
297		goto err_pm;
298	}
299
300	if (mtkd->support_33bits)
301		mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
302
303err_pm:
304	pm_runtime_put_noidle(mtkd->ddev.dev);
305	return ret;
306}
307
308static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
309{
310	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
311	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
312
313	free_irq(c->irq, chan);
314
315	tasklet_kill(&c->vc.task);
316
317	vchan_free_chan_resources(&c->vc);
318
319	pm_runtime_put_sync(mtkd->ddev.dev);
320}
321
322static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
323					 dma_cookie_t cookie,
324					 struct dma_tx_state *txstate)
325{
326	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
327	enum dma_status ret;
328
329	ret = dma_cookie_status(chan, cookie, txstate);
330	if (!txstate)
331		return ret;
332
333	dma_set_residue(txstate, c->rx_status);
334
335	return ret;
336}
337
338/*
339 * dmaengine_prep_slave_single will call the function. and sglen is 1.
340 * 8250 uart using one ring buffer, and deal with one sg.
341 */
342static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
343	(struct dma_chan *chan, struct scatterlist *sgl,
344	unsigned int sglen, enum dma_transfer_direction dir,
345	unsigned long tx_flags, void *context)
346{
347	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
348	struct mtk_uart_apdma_desc *d;
349
350	if (!is_slave_direction(dir) || sglen != 1)
351		return NULL;
352
353	/* Now allocate and setup the descriptor */
354	d = kzalloc(sizeof(*d), GFP_NOWAIT);
355	if (!d)
356		return NULL;
357
358	d->avail_len = sg_dma_len(sgl);
359	d->addr = sg_dma_address(sgl);
360	c->dir = dir;
361
362	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
363}
364
365static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
366{
367	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
368	struct virt_dma_desc *vd;
369	unsigned long flags;
370
371	spin_lock_irqsave(&c->vc.lock, flags);
372	if (vchan_issue_pending(&c->vc) && !c->desc) {
373		vd = vchan_next_desc(&c->vc);
374		c->desc = to_mtk_uart_apdma_desc(&vd->tx);
375
376		if (c->dir == DMA_DEV_TO_MEM)
377			mtk_uart_apdma_start_rx(c);
378		else if (c->dir == DMA_MEM_TO_DEV)
379			mtk_uart_apdma_start_tx(c);
380	}
381
382	spin_unlock_irqrestore(&c->vc.lock, flags);
383}
384
385static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
386				   struct dma_slave_config *config)
387{
388	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
389
390	memcpy(&c->cfg, config, sizeof(*config));
391
392	return 0;
393}
394
395static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
396{
397	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
398	unsigned long flags;
399	unsigned int status;
400	LIST_HEAD(head);
401	int ret;
402
403	mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
404
405	ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
406			  status, status != VFF_FLUSH_B, 10, 100);
407	if (ret)
408		dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
409			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
410
411	/*
412	 * Stop need 3 steps.
413	 * 1. set stop to 1
414	 * 2. wait en to 0
415	 * 3. set stop as 0
416	 */
417	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
418	ret = readx_poll_timeout(readl, c->base + VFF_EN,
419			  status, !status, 10, 100);
420	if (ret)
421		dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
422			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
423
424	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
425	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
426
427	if (c->dir == DMA_DEV_TO_MEM)
428		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
429	else if (c->dir == DMA_MEM_TO_DEV)
430		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
431
432	synchronize_irq(c->irq);
433
434	spin_lock_irqsave(&c->vc.lock, flags);
435	vchan_get_all_descriptors(&c->vc, &head);
436	spin_unlock_irqrestore(&c->vc.lock, flags);
437
438	vchan_dma_desc_free_list(&c->vc, &head);
 
439
440	return 0;
441}
442
443static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
444{
445	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
446	unsigned long flags;
447
448	spin_lock_irqsave(&c->vc.lock, flags);
449
450	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
451	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
452
453	spin_unlock_irqrestore(&c->vc.lock, flags);
454	synchronize_irq(c->irq);
455
 
 
456	return 0;
457}
458
459static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
460{
461	while (!list_empty(&mtkd->ddev.channels)) {
462		struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
463			struct mtk_chan, vc.chan.device_node);
464
465		list_del(&c->vc.chan.device_node);
466		tasklet_kill(&c->vc.task);
467	}
468}
469
470static const struct of_device_id mtk_uart_apdma_match[] = {
471	{ .compatible = "mediatek,mt6577-uart-dma", },
472	{ /* sentinel */ },
473};
474MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
475
476static int mtk_uart_apdma_probe(struct platform_device *pdev)
477{
478	struct device_node *np = pdev->dev.of_node;
479	struct mtk_uart_apdmadev *mtkd;
480	int bit_mask = 32, rc;
 
481	struct mtk_chan *c;
482	unsigned int i;
483
484	mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
485	if (!mtkd)
486		return -ENOMEM;
487
488	mtkd->clk = devm_clk_get(&pdev->dev, NULL);
489	if (IS_ERR(mtkd->clk)) {
490		dev_err(&pdev->dev, "No clock specified\n");
491		rc = PTR_ERR(mtkd->clk);
492		return rc;
493	}
494
495	if (of_property_read_bool(np, "mediatek,dma-33bits"))
496		mtkd->support_33bits = true;
497
498	if (mtkd->support_33bits)
499		bit_mask = 33;
500
501	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
502	if (rc)
503		return rc;
504
505	dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
506	mtkd->ddev.device_alloc_chan_resources =
507				mtk_uart_apdma_alloc_chan_resources;
508	mtkd->ddev.device_free_chan_resources =
509				mtk_uart_apdma_free_chan_resources;
510	mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
511	mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
512	mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
513	mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
514	mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
515	mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
516	mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
517	mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
518	mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
519	mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
520	mtkd->ddev.dev = &pdev->dev;
521	INIT_LIST_HEAD(&mtkd->ddev.channels);
522
523	mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
524	if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
525		dev_info(&pdev->dev,
526			 "Using %u as missing dma-requests property\n",
527			 MTK_UART_APDMA_NR_VCHANS);
528	}
529
530	for (i = 0; i < mtkd->dma_requests; i++) {
531		c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
532		if (!c) {
533			rc = -ENODEV;
534			goto err_no_dma;
535		}
536
537		c->base = devm_platform_ioremap_resource(pdev, i);
 
 
 
 
 
 
538		if (IS_ERR(c->base)) {
539			rc = PTR_ERR(c->base);
540			goto err_no_dma;
541		}
542		c->vc.desc_free = mtk_uart_apdma_desc_free;
543		vchan_init(&c->vc, &mtkd->ddev);
544
545		rc = platform_get_irq(pdev, i);
546		if (rc < 0)
547			goto err_no_dma;
548		c->irq = rc;
549	}
550
551	pm_runtime_enable(&pdev->dev);
 
552
553	rc = dma_async_device_register(&mtkd->ddev);
554	if (rc)
555		goto rpm_disable;
556
557	platform_set_drvdata(pdev, mtkd);
558
559	/* Device-tree DMA controller registration */
560	rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
561	if (rc)
562		goto dma_remove;
563
564	return rc;
565
566dma_remove:
567	dma_async_device_unregister(&mtkd->ddev);
568rpm_disable:
569	pm_runtime_disable(&pdev->dev);
570err_no_dma:
571	mtk_uart_apdma_free(mtkd);
572	return rc;
573}
574
575static void mtk_uart_apdma_remove(struct platform_device *pdev)
576{
577	struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
578
579	of_dma_controller_free(pdev->dev.of_node);
580
581	mtk_uart_apdma_free(mtkd);
582
583	dma_async_device_unregister(&mtkd->ddev);
584
585	pm_runtime_disable(&pdev->dev);
 
 
586}
587
588#ifdef CONFIG_PM_SLEEP
589static int mtk_uart_apdma_suspend(struct device *dev)
590{
591	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
592
593	if (!pm_runtime_suspended(dev))
594		clk_disable_unprepare(mtkd->clk);
595
596	return 0;
597}
598
599static int mtk_uart_apdma_resume(struct device *dev)
600{
601	int ret;
602	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
603
604	if (!pm_runtime_suspended(dev)) {
605		ret = clk_prepare_enable(mtkd->clk);
606		if (ret)
607			return ret;
608	}
609
610	return 0;
611}
612#endif /* CONFIG_PM_SLEEP */
613
614#ifdef CONFIG_PM
615static int mtk_uart_apdma_runtime_suspend(struct device *dev)
616{
617	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
618
619	clk_disable_unprepare(mtkd->clk);
620
621	return 0;
622}
623
624static int mtk_uart_apdma_runtime_resume(struct device *dev)
625{
 
626	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
627
628	return clk_prepare_enable(mtkd->clk);
 
 
 
 
629}
630#endif /* CONFIG_PM */
631
632static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
633	SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
634	SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
635			   mtk_uart_apdma_runtime_resume, NULL)
636};
637
638static struct platform_driver mtk_uart_apdma_driver = {
639	.probe	= mtk_uart_apdma_probe,
640	.remove_new = mtk_uart_apdma_remove,
641	.driver = {
642		.name		= KBUILD_MODNAME,
643		.pm		= &mtk_uart_apdma_pm_ops,
644		.of_match_table = of_match_ptr(mtk_uart_apdma_match),
645	},
646};
647
648module_platform_driver(mtk_uart_apdma_driver);
649
650MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
651MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
652MODULE_LICENSE("GPL v2");
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * MediaTek UART APDMA driver.
  4 *
  5 * Copyright (c) 2019 MediaTek Inc.
  6 * Author: Long Cheng <long.cheng@mediatek.com>
  7 */
  8
  9#include <linux/clk.h>
 10#include <linux/dmaengine.h>
 11#include <linux/dma-mapping.h>
 12#include <linux/err.h>
 13#include <linux/init.h>
 14#include <linux/interrupt.h>
 15#include <linux/iopoll.h>
 16#include <linux/kernel.h>
 17#include <linux/list.h>
 18#include <linux/module.h>
 19#include <linux/of_device.h>
 20#include <linux/of_dma.h>
 21#include <linux/platform_device.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/slab.h>
 24#include <linux/spinlock.h>
 25
 26#include "../virt-dma.h"
 27
 28/* The default number of virtual channel */
 29#define MTK_UART_APDMA_NR_VCHANS	8
 30
 31#define VFF_EN_B		BIT(0)
 32#define VFF_STOP_B		BIT(0)
 33#define VFF_FLUSH_B		BIT(0)
 34#define VFF_4G_EN_B		BIT(0)
 35/* rx valid size >=  vff thre */
 36#define VFF_RX_INT_EN_B		(BIT(0) | BIT(1))
 37/* tx left size >= vff thre */
 38#define VFF_TX_INT_EN_B		BIT(0)
 39#define VFF_WARM_RST_B		BIT(0)
 40#define VFF_RX_INT_CLR_B	(BIT(0) | BIT(1))
 41#define VFF_TX_INT_CLR_B	0
 42#define VFF_STOP_CLR_B		0
 43#define VFF_EN_CLR_B		0
 44#define VFF_INT_EN_CLR_B	0
 45#define VFF_4G_SUPPORT_CLR_B	0
 46
 47/*
 48 * interrupt trigger level for tx
 49 * if threshold is n, no polling is required to start tx.
 50 * otherwise need polling VFF_FLUSH.
 51 */
 52#define VFF_TX_THRE(n)		(n)
 53/* interrupt trigger level for rx */
 54#define VFF_RX_THRE(n)		((n) * 3 / 4)
 55
 56#define VFF_RING_SIZE	0xffff
 57/* invert this bit when wrap ring head again */
 58#define VFF_RING_WRAP	0x10000
 59
 60#define VFF_INT_FLAG		0x00
 61#define VFF_INT_EN		0x04
 62#define VFF_EN			0x08
 63#define VFF_RST			0x0c
 64#define VFF_STOP		0x10
 65#define VFF_FLUSH		0x14
 66#define VFF_ADDR		0x1c
 67#define VFF_LEN			0x24
 68#define VFF_THRE		0x28
 69#define VFF_WPT			0x2c
 70#define VFF_RPT			0x30
 71/* TX: the buffer size HW can read. RX: the buffer size SW can read. */
 72#define VFF_VALID_SIZE		0x3c
 73/* TX: the buffer size SW can write. RX: the buffer size HW can write. */
 74#define VFF_LEFT_SIZE		0x40
 75#define VFF_DEBUG_STATUS	0x50
 76#define VFF_4G_SUPPORT		0x54
 77
 78struct mtk_uart_apdmadev {
 79	struct dma_device ddev;
 80	struct clk *clk;
 81	bool support_33bits;
 82	unsigned int dma_requests;
 83};
 84
 85struct mtk_uart_apdma_desc {
 86	struct virt_dma_desc vd;
 87
 88	dma_addr_t addr;
 89	unsigned int avail_len;
 90};
 91
 92struct mtk_chan {
 93	struct virt_dma_chan vc;
 94	struct dma_slave_config	cfg;
 95	struct mtk_uart_apdma_desc *desc;
 96	enum dma_transfer_direction dir;
 97
 98	void __iomem *base;
 99	unsigned int irq;
100
101	unsigned int rx_status;
102};
103
104static inline struct mtk_uart_apdmadev *
105to_mtk_uart_apdma_dev(struct dma_device *d)
106{
107	return container_of(d, struct mtk_uart_apdmadev, ddev);
108}
109
110static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
111{
112	return container_of(c, struct mtk_chan, vc.chan);
113}
114
115static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
116	(struct dma_async_tx_descriptor *t)
117{
118	return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
119}
120
121static void mtk_uart_apdma_write(struct mtk_chan *c,
122			       unsigned int reg, unsigned int val)
123{
124	writel(val, c->base + reg);
125}
126
127static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
128{
129	return readl(c->base + reg);
130}
131
132static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
133{
134	struct dma_chan *chan = vd->tx.chan;
135	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
136
137	kfree(c->desc);
138}
139
140static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
141{
142	struct mtk_uart_apdmadev *mtkd =
143				to_mtk_uart_apdma_dev(c->vc.chan.device);
144	struct mtk_uart_apdma_desc *d = c->desc;
145	unsigned int wpt, vff_sz;
146
147	vff_sz = c->cfg.dst_port_window_size;
148	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
149		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
150		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
151		mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
152		mtk_uart_apdma_write(c, VFF_WPT, 0);
153		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
154
155		if (mtkd->support_33bits)
156			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
157	}
158
159	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
160	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
161		dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
162
163	if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
164		mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
165		return;
166	}
167
168	wpt = mtk_uart_apdma_read(c, VFF_WPT);
169
170	wpt += c->desc->avail_len;
171	if ((wpt & VFF_RING_SIZE) == vff_sz)
172		wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
173
174	/* Let DMA start moving data */
175	mtk_uart_apdma_write(c, VFF_WPT, wpt);
176
177	/* HW auto set to 0 when left size >= threshold */
178	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
179	if (!mtk_uart_apdma_read(c, VFF_FLUSH))
180		mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
181}
182
183static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
184{
185	struct mtk_uart_apdmadev *mtkd =
186				to_mtk_uart_apdma_dev(c->vc.chan.device);
187	struct mtk_uart_apdma_desc *d = c->desc;
188	unsigned int vff_sz;
189
190	vff_sz = c->cfg.src_port_window_size;
191	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
192		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
193		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
194		mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
195		mtk_uart_apdma_write(c, VFF_RPT, 0);
196		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
197
198		if (mtkd->support_33bits)
199			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
200	}
201
202	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
203	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
204	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
205		dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
206}
207
208static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
209{
210	struct mtk_uart_apdma_desc *d = c->desc;
211
212	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
213	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
214	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
215
216	list_del(&d->vd.node);
217	vchan_cookie_complete(&d->vd);
218}
219
220static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
221{
222	struct mtk_uart_apdma_desc *d = c->desc;
223	unsigned int len, wg, rg;
224	int cnt;
225
226	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
227
228	if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
229		return;
230
231	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
232	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
233
234	len = c->cfg.src_port_window_size;
235	rg = mtk_uart_apdma_read(c, VFF_RPT);
236	wg = mtk_uart_apdma_read(c, VFF_WPT);
237	cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
238
239	/*
240	 * The buffer is ring buffer. If wrap bit different,
241	 * represents the start of the next cycle for WPT
242	 */
243	if ((rg ^ wg) & VFF_RING_WRAP)
244		cnt += len;
245
246	c->rx_status = d->avail_len - cnt;
247	mtk_uart_apdma_write(c, VFF_RPT, wg);
 
 
 
 
 
248
249	list_del(&d->vd.node);
250	vchan_cookie_complete(&d->vd);
 
 
 
251}
252
253static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
254{
255	struct dma_chan *chan = (struct dma_chan *)dev_id;
256	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
257	unsigned long flags;
258
259	spin_lock_irqsave(&c->vc.lock, flags);
260	if (c->dir == DMA_DEV_TO_MEM)
261		mtk_uart_apdma_rx_handler(c);
262	else if (c->dir == DMA_MEM_TO_DEV)
263		mtk_uart_apdma_tx_handler(c);
 
264	spin_unlock_irqrestore(&c->vc.lock, flags);
265
266	return IRQ_HANDLED;
267}
268
269static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
270{
271	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
272	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
273	unsigned int status;
274	int ret;
275
276	ret = pm_runtime_get_sync(mtkd->ddev.dev);
277	if (ret < 0) {
278		pm_runtime_put_noidle(chan->device->dev);
279		return ret;
280	}
281
282	mtk_uart_apdma_write(c, VFF_ADDR, 0);
283	mtk_uart_apdma_write(c, VFF_THRE, 0);
284	mtk_uart_apdma_write(c, VFF_LEN, 0);
285	mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
286
287	ret = readx_poll_timeout(readl, c->base + VFF_EN,
288			  status, !status, 10, 100);
289	if (ret)
290		return ret;
291
292	ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
293			  IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
294	if (ret < 0) {
295		dev_err(chan->device->dev, "Can't request dma IRQ\n");
296		return -EINVAL;
 
297	}
298
299	if (mtkd->support_33bits)
300		mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
301
 
 
302	return ret;
303}
304
305static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
306{
307	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
308	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
309
310	free_irq(c->irq, chan);
311
312	tasklet_kill(&c->vc.task);
313
314	vchan_free_chan_resources(&c->vc);
315
316	pm_runtime_put_sync(mtkd->ddev.dev);
317}
318
319static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
320					 dma_cookie_t cookie,
321					 struct dma_tx_state *txstate)
322{
323	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
324	enum dma_status ret;
325
326	ret = dma_cookie_status(chan, cookie, txstate);
327	if (!txstate)
328		return ret;
329
330	dma_set_residue(txstate, c->rx_status);
331
332	return ret;
333}
334
335/*
336 * dmaengine_prep_slave_single will call the function. and sglen is 1.
337 * 8250 uart using one ring buffer, and deal with one sg.
338 */
339static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
340	(struct dma_chan *chan, struct scatterlist *sgl,
341	unsigned int sglen, enum dma_transfer_direction dir,
342	unsigned long tx_flags, void *context)
343{
344	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
345	struct mtk_uart_apdma_desc *d;
346
347	if (!is_slave_direction(dir) || sglen != 1)
348		return NULL;
349
350	/* Now allocate and setup the descriptor */
351	d = kzalloc(sizeof(*d), GFP_ATOMIC);
352	if (!d)
353		return NULL;
354
355	d->avail_len = sg_dma_len(sgl);
356	d->addr = sg_dma_address(sgl);
357	c->dir = dir;
358
359	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
360}
361
362static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
363{
364	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
365	struct virt_dma_desc *vd;
366	unsigned long flags;
367
368	spin_lock_irqsave(&c->vc.lock, flags);
369	if (vchan_issue_pending(&c->vc)) {
370		vd = vchan_next_desc(&c->vc);
371		c->desc = to_mtk_uart_apdma_desc(&vd->tx);
372
373		if (c->dir == DMA_DEV_TO_MEM)
374			mtk_uart_apdma_start_rx(c);
375		else if (c->dir == DMA_MEM_TO_DEV)
376			mtk_uart_apdma_start_tx(c);
377	}
378
379	spin_unlock_irqrestore(&c->vc.lock, flags);
380}
381
382static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
383				   struct dma_slave_config *config)
384{
385	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
386
387	memcpy(&c->cfg, config, sizeof(*config));
388
389	return 0;
390}
391
392static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
393{
394	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
395	unsigned long flags;
396	unsigned int status;
397	LIST_HEAD(head);
398	int ret;
399
400	mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
401
402	ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
403			  status, status != VFF_FLUSH_B, 10, 100);
404	if (ret)
405		dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
406			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
407
408	/*
409	 * Stop need 3 steps.
410	 * 1. set stop to 1
411	 * 2. wait en to 0
412	 * 3. set stop as 0
413	 */
414	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
415	ret = readx_poll_timeout(readl, c->base + VFF_EN,
416			  status, !status, 10, 100);
417	if (ret)
418		dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
419			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
420
421	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
422	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
423
424	if (c->dir == DMA_DEV_TO_MEM)
425		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
426	else if (c->dir == DMA_MEM_TO_DEV)
427		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
428
429	synchronize_irq(c->irq);
430
431	spin_lock_irqsave(&c->vc.lock, flags);
432	vchan_get_all_descriptors(&c->vc, &head);
 
 
433	vchan_dma_desc_free_list(&c->vc, &head);
434	spin_unlock_irqrestore(&c->vc.lock, flags);
435
436	return 0;
437}
438
439static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
440{
441	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
442	unsigned long flags;
443
444	spin_lock_irqsave(&c->vc.lock, flags);
445
446	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
447	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
448
 
449	synchronize_irq(c->irq);
450
451	spin_unlock_irqrestore(&c->vc.lock, flags);
452
453	return 0;
454}
455
456static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
457{
458	while (!list_empty(&mtkd->ddev.channels)) {
459		struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
460			struct mtk_chan, vc.chan.device_node);
461
462		list_del(&c->vc.chan.device_node);
463		tasklet_kill(&c->vc.task);
464	}
465}
466
467static const struct of_device_id mtk_uart_apdma_match[] = {
468	{ .compatible = "mediatek,mt6577-uart-dma", },
469	{ /* sentinel */ },
470};
471MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
472
473static int mtk_uart_apdma_probe(struct platform_device *pdev)
474{
475	struct device_node *np = pdev->dev.of_node;
476	struct mtk_uart_apdmadev *mtkd;
477	int bit_mask = 32, rc;
478	struct resource *res;
479	struct mtk_chan *c;
480	unsigned int i;
481
482	mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
483	if (!mtkd)
484		return -ENOMEM;
485
486	mtkd->clk = devm_clk_get(&pdev->dev, NULL);
487	if (IS_ERR(mtkd->clk)) {
488		dev_err(&pdev->dev, "No clock specified\n");
489		rc = PTR_ERR(mtkd->clk);
490		return rc;
491	}
492
493	if (of_property_read_bool(np, "mediatek,dma-33bits"))
494		mtkd->support_33bits = true;
495
496	if (mtkd->support_33bits)
497		bit_mask = 33;
498
499	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
500	if (rc)
501		return rc;
502
503	dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
504	mtkd->ddev.device_alloc_chan_resources =
505				mtk_uart_apdma_alloc_chan_resources;
506	mtkd->ddev.device_free_chan_resources =
507				mtk_uart_apdma_free_chan_resources;
508	mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
509	mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
510	mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
511	mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
512	mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
513	mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
514	mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
515	mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
516	mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
517	mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
518	mtkd->ddev.dev = &pdev->dev;
519	INIT_LIST_HEAD(&mtkd->ddev.channels);
520
521	mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
522	if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
523		dev_info(&pdev->dev,
524			 "Using %u as missing dma-requests property\n",
525			 MTK_UART_APDMA_NR_VCHANS);
526	}
527
528	for (i = 0; i < mtkd->dma_requests; i++) {
529		c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
530		if (!c) {
531			rc = -ENODEV;
532			goto err_no_dma;
533		}
534
535		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
536		if (!res) {
537			rc = -ENODEV;
538			goto err_no_dma;
539		}
540
541		c->base = devm_ioremap_resource(&pdev->dev, res);
542		if (IS_ERR(c->base)) {
543			rc = PTR_ERR(c->base);
544			goto err_no_dma;
545		}
546		c->vc.desc_free = mtk_uart_apdma_desc_free;
547		vchan_init(&c->vc, &mtkd->ddev);
548
549		rc = platform_get_irq(pdev, i);
550		if (rc < 0)
551			goto err_no_dma;
552		c->irq = rc;
553	}
554
555	pm_runtime_enable(&pdev->dev);
556	pm_runtime_set_active(&pdev->dev);
557
558	rc = dma_async_device_register(&mtkd->ddev);
559	if (rc)
560		goto rpm_disable;
561
562	platform_set_drvdata(pdev, mtkd);
563
564	/* Device-tree DMA controller registration */
565	rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
566	if (rc)
567		goto dma_remove;
568
569	return rc;
570
571dma_remove:
572	dma_async_device_unregister(&mtkd->ddev);
573rpm_disable:
574	pm_runtime_disable(&pdev->dev);
575err_no_dma:
576	mtk_uart_apdma_free(mtkd);
577	return rc;
578}
579
580static int mtk_uart_apdma_remove(struct platform_device *pdev)
581{
582	struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
583
584	of_dma_controller_free(pdev->dev.of_node);
585
586	mtk_uart_apdma_free(mtkd);
587
588	dma_async_device_unregister(&mtkd->ddev);
589
590	pm_runtime_disable(&pdev->dev);
591
592	return 0;
593}
594
595#ifdef CONFIG_PM_SLEEP
596static int mtk_uart_apdma_suspend(struct device *dev)
597{
598	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
599
600	if (!pm_runtime_suspended(dev))
601		clk_disable_unprepare(mtkd->clk);
602
603	return 0;
604}
605
606static int mtk_uart_apdma_resume(struct device *dev)
607{
608	int ret;
609	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
610
611	if (!pm_runtime_suspended(dev)) {
612		ret = clk_prepare_enable(mtkd->clk);
613		if (ret)
614			return ret;
615	}
616
617	return 0;
618}
619#endif /* CONFIG_PM_SLEEP */
620
621#ifdef CONFIG_PM
622static int mtk_uart_apdma_runtime_suspend(struct device *dev)
623{
624	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
625
626	clk_disable_unprepare(mtkd->clk);
627
628	return 0;
629}
630
631static int mtk_uart_apdma_runtime_resume(struct device *dev)
632{
633	int ret;
634	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
635
636	ret = clk_prepare_enable(mtkd->clk);
637	if (ret)
638		return ret;
639
640	return 0;
641}
642#endif /* CONFIG_PM */
643
644static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
645	SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
646	SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
647			   mtk_uart_apdma_runtime_resume, NULL)
648};
649
650static struct platform_driver mtk_uart_apdma_driver = {
651	.probe	= mtk_uart_apdma_probe,
652	.remove	= mtk_uart_apdma_remove,
653	.driver = {
654		.name		= KBUILD_MODNAME,
655		.pm		= &mtk_uart_apdma_pm_ops,
656		.of_match_table = of_match_ptr(mtk_uart_apdma_match),
657	},
658};
659
660module_platform_driver(mtk_uart_apdma_driver);
661
662MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
663MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
664MODULE_LICENSE("GPL v2");