Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Core driver for the High Speed UART DMA
  4 *
  5 * Copyright (C) 2015 Intel Corporation
  6 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  7 *
  8 * Partially based on the bits found in drivers/tty/serial/mfd.c.
 
 
 
 
  9 */
 10
 11/*
 12 * DMA channel allocation:
 13 * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
 14 *    Write (UART RX).
 15 * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
 16 *    port 3, and so on.
 17 */
 18
 19#include <linux/delay.h>
 20#include <linux/dmaengine.h>
 21#include <linux/dma-mapping.h>
 22#include <linux/init.h>
 23#include <linux/module.h>
 24#include <linux/slab.h>
 25
 26#include "hsu.h"
 27
 28#define HSU_DMA_BUSWIDTHS				\
 29	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	|	\
 30	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		|	\
 31	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		|	\
 32	BIT(DMA_SLAVE_BUSWIDTH_3_BYTES)		|	\
 33	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)		|	\
 34	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)		|	\
 35	BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
 36
 37static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
 38{
 39	hsu_chan_writel(hsuc, HSU_CH_CR, 0);
 40}
 41
 42static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
 43{
 44	u32 cr = HSU_CH_CR_CHA;
 45
 46	if (hsuc->direction == DMA_MEM_TO_DEV)
 47		cr &= ~HSU_CH_CR_CHD;
 48	else if (hsuc->direction == DMA_DEV_TO_MEM)
 49		cr |= HSU_CH_CR_CHD;
 50
 51	hsu_chan_writel(hsuc, HSU_CH_CR, cr);
 52}
 53
 54static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
 55{
 56	struct dma_slave_config *config = &hsuc->config;
 57	struct hsu_dma_desc *desc = hsuc->desc;
 58	u32 bsr = 0, mtsr = 0;	/* to shut the compiler up */
 59	u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
 60	unsigned int i, count;
 61
 62	if (hsuc->direction == DMA_MEM_TO_DEV) {
 63		bsr = config->dst_maxburst;
 64		mtsr = config->dst_addr_width;
 65	} else if (hsuc->direction == DMA_DEV_TO_MEM) {
 66		bsr = config->src_maxburst;
 67		mtsr = config->src_addr_width;
 68	}
 69
 70	hsu_chan_disable(hsuc);
 71
 72	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
 73	hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
 74	hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
 75
 76	/* Set descriptors */
 77	count = desc->nents - desc->active;
 78	for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
 79		hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
 80		hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
 81
 82		/* Prepare value for DCR */
 83		dcr |= HSU_CH_DCR_DESCA(i);
 84		dcr |= HSU_CH_DCR_CHTOI(i);	/* timeout bit, see HSU Errata 1 */
 85
 86		desc->active++;
 87	}
 88	/* Only for the last descriptor in the chain */
 89	dcr |= HSU_CH_DCR_CHSOD(count - 1);
 90	dcr |= HSU_CH_DCR_CHDI(count - 1);
 91
 92	hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
 93
 94	hsu_chan_enable(hsuc);
 95}
 96
 97static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
 98{
 99	hsu_chan_disable(hsuc);
100	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
101}
102
103static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
104{
105	hsu_dma_chan_start(hsuc);
106}
107
108static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
109{
110	struct virt_dma_desc *vdesc;
111
112	/* Get the next descriptor */
113	vdesc = vchan_next_desc(&hsuc->vchan);
114	if (!vdesc) {
115		hsuc->desc = NULL;
116		return;
117	}
118
119	list_del(&vdesc->node);
120	hsuc->desc = to_hsu_dma_desc(vdesc);
121
122	/* Start the channel with a new descriptor */
123	hsu_dma_start_channel(hsuc);
124}
125
126/*
127 *      hsu_dma_get_status() - get DMA channel status
128 *      @chip: HSUART DMA chip
129 *      @nr: DMA channel number
130 *      @status: pointer for DMA Channel Status Register value
131 *
132 *      Description:
133 *      The function reads and clears the DMA Channel Status Register, checks
134 *      if it was a timeout interrupt and returns a corresponding value.
135 *
136 *      Caller should provide a valid pointer for the DMA Channel Status
137 *      Register value that will be returned in @status.
138 *
139 *      Return:
140 *      1 for DMA timeout status, 0 for other DMA status, or error code for
141 *      invalid parameters or no interrupt pending.
142 */
143int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
144		       u32 *status)
145{
146	struct hsu_dma_chan *hsuc;
 
147	unsigned long flags;
148	u32 sr;
149
150	/* Sanity check */
151	if (nr >= chip->hsu->nr_channels)
152		return -EINVAL;
153
154	hsuc = &chip->hsu->chan[nr];
155
156	/*
157	 * No matter what situation, need read clear the IRQ status
158	 * There is a bug, see Errata 5, HSD 2900918
159	 */
160	spin_lock_irqsave(&hsuc->vchan.lock, flags);
161	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
162	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
163
164	/* Check if any interrupt is pending */
165	sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
166	if (!sr)
167		return -EIO;
168
169	/* Timeout IRQ, need wait some time, see Errata 2 */
170	if (sr & HSU_CH_SR_DESCTO_ANY)
171		udelay(2);
172
173	/*
174	 * At this point, at least one of Descriptor Time Out, Channel Error
175	 * or Descriptor Done bits must be set. Clear the Descriptor Time Out
176	 * bits and if sr is still non-zero, it must be channel error or
177	 * descriptor done which are higher priority than timeout and handled
178	 * in hsu_dma_do_irq(). Else, it must be a timeout.
179	 */
180	sr &= ~HSU_CH_SR_DESCTO_ANY;
181
182	*status = sr;
183
184	return sr ? 0 : 1;
185}
186EXPORT_SYMBOL_GPL(hsu_dma_get_status);
187
188/*
189 *      hsu_dma_do_irq() - DMA interrupt handler
190 *      @chip: HSUART DMA chip
191 *      @nr: DMA channel number
192 *      @status: Channel Status Register value
193 *
194 *      Description:
195 *      This function handles Channel Error and Descriptor Done interrupts.
196 *      This function should be called after determining that the DMA interrupt
197 *      is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
198 *
199 *      Return:
200 *      0 for invalid channel number, 1 otherwise.
201 */
202int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
203{
204	struct hsu_dma_chan *hsuc;
205	struct hsu_dma_desc *desc;
206	unsigned long flags;
207
208	/* Sanity check */
209	if (nr >= chip->hsu->nr_channels)
210		return 0;
211
212	hsuc = &chip->hsu->chan[nr];
213
214	spin_lock_irqsave(&hsuc->vchan.lock, flags);
215	desc = hsuc->desc;
216	if (desc) {
217		if (status & HSU_CH_SR_CHE) {
218			desc->status = DMA_ERROR;
219		} else if (desc->active < desc->nents) {
220			hsu_dma_start_channel(hsuc);
221		} else {
222			vchan_cookie_complete(&desc->vdesc);
223			desc->status = DMA_COMPLETE;
224			hsu_dma_start_transfer(hsuc);
225		}
226	}
227	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
228
229	return 1;
230}
231EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
232
233static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
234{
235	struct hsu_dma_desc *desc;
236
237	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
238	if (!desc)
239		return NULL;
240
241	desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
242	if (!desc->sg) {
243		kfree(desc);
244		return NULL;
245	}
246
247	return desc;
248}
249
250static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
251{
252	struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
253
254	kfree(desc->sg);
255	kfree(desc);
256}
257
258static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
259		struct dma_chan *chan, struct scatterlist *sgl,
260		unsigned int sg_len, enum dma_transfer_direction direction,
261		unsigned long flags, void *context)
262{
263	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
264	struct hsu_dma_desc *desc;
265	struct scatterlist *sg;
266	unsigned int i;
267
268	desc = hsu_dma_alloc_desc(sg_len);
269	if (!desc)
270		return NULL;
271
272	for_each_sg(sgl, sg, sg_len, i) {
273		desc->sg[i].addr = sg_dma_address(sg);
274		desc->sg[i].len = sg_dma_len(sg);
275
276		desc->length += sg_dma_len(sg);
277	}
278
279	desc->nents = sg_len;
280	desc->direction = direction;
281	/* desc->active = 0 by kzalloc */
282	desc->status = DMA_IN_PROGRESS;
283
284	return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
285}
286
287static void hsu_dma_issue_pending(struct dma_chan *chan)
288{
289	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
290	unsigned long flags;
291
292	spin_lock_irqsave(&hsuc->vchan.lock, flags);
293	if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
294		hsu_dma_start_transfer(hsuc);
295	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
296}
297
298static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
299{
300	struct hsu_dma_desc *desc = hsuc->desc;
301	size_t bytes = 0;
302	int i;
303
304	for (i = desc->active; i < desc->nents; i++)
305		bytes += desc->sg[i].len;
306
307	i = HSU_DMA_CHAN_NR_DESC - 1;
308	do {
309		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
310	} while (--i >= 0);
311
312	return bytes;
313}
314
315static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
316	dma_cookie_t cookie, struct dma_tx_state *state)
317{
318	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
319	struct virt_dma_desc *vdesc;
320	enum dma_status status;
321	size_t bytes;
322	unsigned long flags;
323
324	status = dma_cookie_status(chan, cookie, state);
325	if (status == DMA_COMPLETE)
326		return status;
327
328	spin_lock_irqsave(&hsuc->vchan.lock, flags);
329	vdesc = vchan_find_desc(&hsuc->vchan, cookie);
330	if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
331		bytes = hsu_dma_active_desc_size(hsuc);
332		dma_set_residue(state, bytes);
333		status = hsuc->desc->status;
334	} else if (vdesc) {
335		bytes = to_hsu_dma_desc(vdesc)->length;
336		dma_set_residue(state, bytes);
337	}
338	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
339
340	return status;
341}
342
343static int hsu_dma_slave_config(struct dma_chan *chan,
344				struct dma_slave_config *config)
345{
346	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
347
 
 
 
 
348	memcpy(&hsuc->config, config, sizeof(hsuc->config));
349
350	return 0;
351}
352
353static int hsu_dma_pause(struct dma_chan *chan)
354{
355	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
356	unsigned long flags;
357
358	spin_lock_irqsave(&hsuc->vchan.lock, flags);
359	if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
360		hsu_chan_disable(hsuc);
361		hsuc->desc->status = DMA_PAUSED;
362	}
363	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
364
365	return 0;
366}
367
368static int hsu_dma_resume(struct dma_chan *chan)
369{
370	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
371	unsigned long flags;
372
373	spin_lock_irqsave(&hsuc->vchan.lock, flags);
374	if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
375		hsuc->desc->status = DMA_IN_PROGRESS;
376		hsu_chan_enable(hsuc);
377	}
378	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
379
380	return 0;
381}
382
383static int hsu_dma_terminate_all(struct dma_chan *chan)
384{
385	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
386	unsigned long flags;
387	LIST_HEAD(head);
388
389	spin_lock_irqsave(&hsuc->vchan.lock, flags);
390
391	hsu_dma_stop_channel(hsuc);
392	if (hsuc->desc) {
393		hsu_dma_desc_free(&hsuc->desc->vdesc);
394		hsuc->desc = NULL;
395	}
396
397	vchan_get_all_descriptors(&hsuc->vchan, &head);
398	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
399	vchan_dma_desc_free_list(&hsuc->vchan, &head);
400
401	return 0;
402}
403
404static void hsu_dma_free_chan_resources(struct dma_chan *chan)
405{
406	vchan_free_chan_resources(to_virt_chan(chan));
407}
408
409static void hsu_dma_synchronize(struct dma_chan *chan)
410{
411	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
412
413	vchan_synchronize(&hsuc->vchan);
414}
415
416int hsu_dma_probe(struct hsu_dma_chip *chip)
417{
418	struct hsu_dma *hsu;
419	void __iomem *addr = chip->regs + chip->offset;
420	unsigned short i;
421	int ret;
422
423	hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
424	if (!hsu)
425		return -ENOMEM;
426
427	chip->hsu = hsu;
428
429	/* Calculate nr_channels from the IO space length */
430	hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
431
432	hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
433				 sizeof(*hsu->chan), GFP_KERNEL);
434	if (!hsu->chan)
435		return -ENOMEM;
436
437	INIT_LIST_HEAD(&hsu->dma.channels);
438	for (i = 0; i < hsu->nr_channels; i++) {
439		struct hsu_dma_chan *hsuc = &hsu->chan[i];
440
441		hsuc->vchan.desc_free = hsu_dma_desc_free;
442		vchan_init(&hsuc->vchan, &hsu->dma);
443
444		hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
445		hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
446	}
447
448	dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
449	dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
450
451	hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
452
453	hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
454
455	hsu->dma.device_issue_pending = hsu_dma_issue_pending;
456	hsu->dma.device_tx_status = hsu_dma_tx_status;
457
458	hsu->dma.device_config = hsu_dma_slave_config;
459	hsu->dma.device_pause = hsu_dma_pause;
460	hsu->dma.device_resume = hsu_dma_resume;
461	hsu->dma.device_terminate_all = hsu_dma_terminate_all;
462	hsu->dma.device_synchronize = hsu_dma_synchronize;
463
464	hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
465	hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
466	hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
467	hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
468
469	hsu->dma.dev = chip->dev;
470
471	dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
472
473	ret = dma_async_device_register(&hsu->dma);
474	if (ret)
475		return ret;
476
477	dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
478	return 0;
479}
480EXPORT_SYMBOL_GPL(hsu_dma_probe);
481
482int hsu_dma_remove(struct hsu_dma_chip *chip)
483{
484	struct hsu_dma *hsu = chip->hsu;
485	unsigned short i;
486
487	dma_async_device_unregister(&hsu->dma);
488
489	for (i = 0; i < hsu->nr_channels; i++) {
490		struct hsu_dma_chan *hsuc = &hsu->chan[i];
491
492		tasklet_kill(&hsuc->vchan.task);
493	}
494
495	return 0;
496}
497EXPORT_SYMBOL_GPL(hsu_dma_remove);
498
499MODULE_LICENSE("GPL v2");
500MODULE_DESCRIPTION("High Speed UART DMA core driver");
501MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
v4.6
 
  1/*
  2 * Core driver for the High Speed UART DMA
  3 *
  4 * Copyright (C) 2015 Intel Corporation
  5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  6 *
  7 * Partially based on the bits found in drivers/tty/serial/mfd.c.
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License version 2 as
 11 * published by the Free Software Foundation.
 12 */
 13
 14/*
 15 * DMA channel allocation:
 16 * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
 17 *    Write (UART RX).
 18 * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
 19 *    port 3, and so on.
 20 */
 21
 22#include <linux/delay.h>
 23#include <linux/dmaengine.h>
 24#include <linux/dma-mapping.h>
 25#include <linux/init.h>
 26#include <linux/module.h>
 27#include <linux/slab.h>
 28
 29#include "hsu.h"
 30
 31#define HSU_DMA_BUSWIDTHS				\
 32	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	|	\
 33	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		|	\
 34	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		|	\
 35	BIT(DMA_SLAVE_BUSWIDTH_3_BYTES)		|	\
 36	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)		|	\
 37	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)		|	\
 38	BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
 39
 40static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
 41{
 42	hsu_chan_writel(hsuc, HSU_CH_CR, 0);
 43}
 44
 45static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
 46{
 47	u32 cr = HSU_CH_CR_CHA;
 48
 49	if (hsuc->direction == DMA_MEM_TO_DEV)
 50		cr &= ~HSU_CH_CR_CHD;
 51	else if (hsuc->direction == DMA_DEV_TO_MEM)
 52		cr |= HSU_CH_CR_CHD;
 53
 54	hsu_chan_writel(hsuc, HSU_CH_CR, cr);
 55}
 56
 57static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
 58{
 59	struct dma_slave_config *config = &hsuc->config;
 60	struct hsu_dma_desc *desc = hsuc->desc;
 61	u32 bsr = 0, mtsr = 0;	/* to shut the compiler up */
 62	u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
 63	unsigned int i, count;
 64
 65	if (hsuc->direction == DMA_MEM_TO_DEV) {
 66		bsr = config->dst_maxburst;
 67		mtsr = config->src_addr_width;
 68	} else if (hsuc->direction == DMA_DEV_TO_MEM) {
 69		bsr = config->src_maxburst;
 70		mtsr = config->dst_addr_width;
 71	}
 72
 73	hsu_chan_disable(hsuc);
 74
 75	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
 76	hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
 77	hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
 78
 79	/* Set descriptors */
 80	count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC;
 81	for (i = 0; i < count; i++) {
 82		hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
 83		hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
 84
 85		/* Prepare value for DCR */
 86		dcr |= HSU_CH_DCR_DESCA(i);
 87		dcr |= HSU_CH_DCR_CHTOI(i);	/* timeout bit, see HSU Errata 1 */
 88
 89		desc->active++;
 90	}
 91	/* Only for the last descriptor in the chain */
 92	dcr |= HSU_CH_DCR_CHSOD(count - 1);
 93	dcr |= HSU_CH_DCR_CHDI(count - 1);
 94
 95	hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
 96
 97	hsu_chan_enable(hsuc);
 98}
 99
100static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
101{
102	hsu_chan_disable(hsuc);
103	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
104}
105
106static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
107{
108	hsu_dma_chan_start(hsuc);
109}
110
111static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
112{
113	struct virt_dma_desc *vdesc;
114
115	/* Get the next descriptor */
116	vdesc = vchan_next_desc(&hsuc->vchan);
117	if (!vdesc) {
118		hsuc->desc = NULL;
119		return;
120	}
121
122	list_del(&vdesc->node);
123	hsuc->desc = to_hsu_dma_desc(vdesc);
124
125	/* Start the channel with a new descriptor */
126	hsu_dma_start_channel(hsuc);
127}
128
129static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
130{
131	unsigned long flags;
132	u32 sr;
133
134	spin_lock_irqsave(&hsuc->vchan.lock, flags);
135	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
136	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
137
138	return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
139}
140
141irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
 
 
 
 
 
 
142{
143	struct hsu_dma_chan *hsuc;
144	struct hsu_dma_desc *desc;
145	unsigned long flags;
146	u32 sr;
147
148	/* Sanity check */
149	if (nr >= chip->hsu->nr_channels)
150		return IRQ_NONE;
151
152	hsuc = &chip->hsu->chan[nr];
153
154	/*
155	 * No matter what situation, need read clear the IRQ status
156	 * There is a bug, see Errata 5, HSD 2900918
157	 */
158	sr = hsu_dma_chan_get_sr(hsuc);
 
 
 
 
 
159	if (!sr)
160		return IRQ_NONE;
161
162	/* Timeout IRQ, need wait some time, see Errata 2 */
163	if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY))
164		udelay(2);
165
 
 
 
 
 
 
 
166	sr &= ~HSU_CH_SR_DESCTO_ANY;
167	if (!sr)
168		return IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
170	spin_lock_irqsave(&hsuc->vchan.lock, flags);
171	desc = hsuc->desc;
172	if (desc) {
173		if (sr & HSU_CH_SR_CHE) {
174			desc->status = DMA_ERROR;
175		} else if (desc->active < desc->nents) {
176			hsu_dma_start_channel(hsuc);
177		} else {
178			vchan_cookie_complete(&desc->vdesc);
179			desc->status = DMA_COMPLETE;
180			hsu_dma_start_transfer(hsuc);
181		}
182	}
183	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
184
185	return IRQ_HANDLED;
186}
187EXPORT_SYMBOL_GPL(hsu_dma_irq);
188
189static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
190{
191	struct hsu_dma_desc *desc;
192
193	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
194	if (!desc)
195		return NULL;
196
197	desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
198	if (!desc->sg) {
199		kfree(desc);
200		return NULL;
201	}
202
203	return desc;
204}
205
206static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
207{
208	struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
209
210	kfree(desc->sg);
211	kfree(desc);
212}
213
214static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
215		struct dma_chan *chan, struct scatterlist *sgl,
216		unsigned int sg_len, enum dma_transfer_direction direction,
217		unsigned long flags, void *context)
218{
219	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
220	struct hsu_dma_desc *desc;
221	struct scatterlist *sg;
222	unsigned int i;
223
224	desc = hsu_dma_alloc_desc(sg_len);
225	if (!desc)
226		return NULL;
227
228	for_each_sg(sgl, sg, sg_len, i) {
229		desc->sg[i].addr = sg_dma_address(sg);
230		desc->sg[i].len = sg_dma_len(sg);
231
232		desc->length += sg_dma_len(sg);
233	}
234
235	desc->nents = sg_len;
236	desc->direction = direction;
237	/* desc->active = 0 by kzalloc */
238	desc->status = DMA_IN_PROGRESS;
239
240	return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
241}
242
243static void hsu_dma_issue_pending(struct dma_chan *chan)
244{
245	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
246	unsigned long flags;
247
248	spin_lock_irqsave(&hsuc->vchan.lock, flags);
249	if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
250		hsu_dma_start_transfer(hsuc);
251	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
252}
253
254static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
255{
256	struct hsu_dma_desc *desc = hsuc->desc;
257	size_t bytes = 0;
258	int i;
259
260	for (i = desc->active; i < desc->nents; i++)
261		bytes += desc->sg[i].len;
262
263	i = HSU_DMA_CHAN_NR_DESC - 1;
264	do {
265		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
266	} while (--i >= 0);
267
268	return bytes;
269}
270
271static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
272	dma_cookie_t cookie, struct dma_tx_state *state)
273{
274	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
275	struct virt_dma_desc *vdesc;
276	enum dma_status status;
277	size_t bytes;
278	unsigned long flags;
279
280	status = dma_cookie_status(chan, cookie, state);
281	if (status == DMA_COMPLETE)
282		return status;
283
284	spin_lock_irqsave(&hsuc->vchan.lock, flags);
285	vdesc = vchan_find_desc(&hsuc->vchan, cookie);
286	if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
287		bytes = hsu_dma_active_desc_size(hsuc);
288		dma_set_residue(state, bytes);
289		status = hsuc->desc->status;
290	} else if (vdesc) {
291		bytes = to_hsu_dma_desc(vdesc)->length;
292		dma_set_residue(state, bytes);
293	}
294	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
295
296	return status;
297}
298
299static int hsu_dma_slave_config(struct dma_chan *chan,
300				struct dma_slave_config *config)
301{
302	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
303
304	/* Check if chan will be configured for slave transfers */
305	if (!is_slave_direction(config->direction))
306		return -EINVAL;
307
308	memcpy(&hsuc->config, config, sizeof(hsuc->config));
309
310	return 0;
311}
312
313static int hsu_dma_pause(struct dma_chan *chan)
314{
315	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
316	unsigned long flags;
317
318	spin_lock_irqsave(&hsuc->vchan.lock, flags);
319	if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
320		hsu_chan_disable(hsuc);
321		hsuc->desc->status = DMA_PAUSED;
322	}
323	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
324
325	return 0;
326}
327
328static int hsu_dma_resume(struct dma_chan *chan)
329{
330	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
331	unsigned long flags;
332
333	spin_lock_irqsave(&hsuc->vchan.lock, flags);
334	if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
335		hsuc->desc->status = DMA_IN_PROGRESS;
336		hsu_chan_enable(hsuc);
337	}
338	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
339
340	return 0;
341}
342
343static int hsu_dma_terminate_all(struct dma_chan *chan)
344{
345	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
346	unsigned long flags;
347	LIST_HEAD(head);
348
349	spin_lock_irqsave(&hsuc->vchan.lock, flags);
350
351	hsu_dma_stop_channel(hsuc);
352	if (hsuc->desc) {
353		hsu_dma_desc_free(&hsuc->desc->vdesc);
354		hsuc->desc = NULL;
355	}
356
357	vchan_get_all_descriptors(&hsuc->vchan, &head);
358	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
359	vchan_dma_desc_free_list(&hsuc->vchan, &head);
360
361	return 0;
362}
363
364static void hsu_dma_free_chan_resources(struct dma_chan *chan)
365{
366	vchan_free_chan_resources(to_virt_chan(chan));
367}
368
 
 
 
 
 
 
 
369int hsu_dma_probe(struct hsu_dma_chip *chip)
370{
371	struct hsu_dma *hsu;
372	void __iomem *addr = chip->regs + chip->offset;
373	unsigned short i;
374	int ret;
375
376	hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
377	if (!hsu)
378		return -ENOMEM;
379
380	chip->hsu = hsu;
381
382	/* Calculate nr_channels from the IO space length */
383	hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
384
385	hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
386				 sizeof(*hsu->chan), GFP_KERNEL);
387	if (!hsu->chan)
388		return -ENOMEM;
389
390	INIT_LIST_HEAD(&hsu->dma.channels);
391	for (i = 0; i < hsu->nr_channels; i++) {
392		struct hsu_dma_chan *hsuc = &hsu->chan[i];
393
394		hsuc->vchan.desc_free = hsu_dma_desc_free;
395		vchan_init(&hsuc->vchan, &hsu->dma);
396
397		hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
398		hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
399	}
400
401	dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
402	dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
403
404	hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
405
406	hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
407
408	hsu->dma.device_issue_pending = hsu_dma_issue_pending;
409	hsu->dma.device_tx_status = hsu_dma_tx_status;
410
411	hsu->dma.device_config = hsu_dma_slave_config;
412	hsu->dma.device_pause = hsu_dma_pause;
413	hsu->dma.device_resume = hsu_dma_resume;
414	hsu->dma.device_terminate_all = hsu_dma_terminate_all;
 
415
416	hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
417	hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
418	hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
419	hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
420
421	hsu->dma.dev = chip->dev;
 
 
422
423	ret = dma_async_device_register(&hsu->dma);
424	if (ret)
425		return ret;
426
427	dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
428	return 0;
429}
430EXPORT_SYMBOL_GPL(hsu_dma_probe);
431
432int hsu_dma_remove(struct hsu_dma_chip *chip)
433{
434	struct hsu_dma *hsu = chip->hsu;
435	unsigned short i;
436
437	dma_async_device_unregister(&hsu->dma);
438
439	for (i = 0; i < hsu->nr_channels; i++) {
440		struct hsu_dma_chan *hsuc = &hsu->chan[i];
441
442		tasklet_kill(&hsuc->vchan.task);
443	}
444
445	return 0;
446}
447EXPORT_SYMBOL_GPL(hsu_dma_remove);
448
449MODULE_LICENSE("GPL v2");
450MODULE_DESCRIPTION("High Speed UART DMA core driver");
451MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");