Linux Audio

Check our new training course

Loading...
  1/*
  2 * sound/soc/samsung/idma.c
  3 *
  4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  5 *		http://www.samsung.com
  6 *
  7 * I2S0's Internal DMA driver
  8 *
  9 * This program is free software; you can redistribute  it and/or modify it
 10 * under  the terms of  the GNU General  Public License as published by the
 11 * Free Software Foundation;  either version 2 of the  License, or (at your
 12 * option) any later version.
 13 */
 14#include <linux/interrupt.h>
 15#include <linux/platform_device.h>
 16#include <linux/dma-mapping.h>
 17#include <linux/slab.h>
 18#include <linux/module.h>
 19#include <sound/pcm.h>
 20#include <sound/pcm_params.h>
 21#include <sound/soc.h>
 22
 23#include "i2s.h"
 24#include "idma.h"
 25#include "dma.h"
 26#include "i2s-regs.h"
 27
 28#define ST_RUNNING		(1<<0)
 29#define ST_OPENED		(1<<1)
 30
 31static const struct snd_pcm_hardware idma_hardware = {
 32	.info = SNDRV_PCM_INFO_INTERLEAVED |
 33		    SNDRV_PCM_INFO_BLOCK_TRANSFER |
 34		    SNDRV_PCM_INFO_MMAP |
 35		    SNDRV_PCM_INFO_MMAP_VALID |
 36		    SNDRV_PCM_INFO_PAUSE |
 37		    SNDRV_PCM_INFO_RESUME,
 38	.buffer_bytes_max = MAX_IDMA_BUFFER,
 39	.period_bytes_min = 128,
 40	.period_bytes_max = MAX_IDMA_PERIOD,
 41	.periods_min = 1,
 42	.periods_max = 2,
 43};
 44
 45struct idma_ctrl {
 46	spinlock_t	lock;
 47	int		state;
 48	dma_addr_t	start;
 49	dma_addr_t	pos;
 50	dma_addr_t	end;
 51	dma_addr_t	period;
 52	dma_addr_t	periodsz;
 53	void		*token;
 54	void		(*cb)(void *dt, int bytes_xfer);
 55};
 56
 57static struct idma_info {
 58	spinlock_t	lock;
 59	void		 __iomem  *regs;
 60	dma_addr_t	lp_tx_addr;
 61} idma;
 62
 63static int idma_irq;
 64
 65static void idma_getpos(dma_addr_t *src)
 66{
 67	*src = idma.lp_tx_addr +
 68		(readl(idma.regs + I2STRNCNT) & 0xffffff) * 4;
 69}
 70
 71static int idma_enqueue(struct snd_pcm_substream *substream)
 72{
 73	struct snd_pcm_runtime *runtime = substream->runtime;
 74	struct idma_ctrl *prtd = substream->runtime->private_data;
 75	u32 val;
 76
 77	spin_lock(&prtd->lock);
 78	prtd->token = (void *) substream;
 79	spin_unlock(&prtd->lock);
 80
 81	/* Internal DMA Level0 Interrupt Address */
 82	val = idma.lp_tx_addr + prtd->periodsz;
 83	writel(val, idma.regs + I2SLVL0ADDR);
 84
 85	/* Start address0 of I2S internal DMA operation. */
 86	val = idma.lp_tx_addr;
 87	writel(val, idma.regs + I2SSTR0);
 88
 89	/*
 90	 * Transfer block size for I2S internal DMA.
 91	 * Should decide transfer size before start dma operation
 92	 */
 93	val = readl(idma.regs + I2SSIZE);
 94	val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT);
 95	val |= (((runtime->dma_bytes >> 2) &
 96			I2SSIZE_TRNMSK) << I2SSIZE_SHIFT);
 97	writel(val, idma.regs + I2SSIZE);
 98
 99	val = readl(idma.regs + I2SAHB);
100	val |= AHB_INTENLVL0;
101	writel(val, idma.regs + I2SAHB);
102
103	return 0;
104}
105
106static void idma_setcallbk(struct snd_pcm_substream *substream,
107				void (*cb)(void *, int))
108{
109	struct idma_ctrl *prtd = substream->runtime->private_data;
110
111	spin_lock(&prtd->lock);
112	prtd->cb = cb;
113	spin_unlock(&prtd->lock);
114}
115
116static void idma_control(int op)
117{
118	u32 val = readl(idma.regs + I2SAHB);
119
120	spin_lock(&idma.lock);
121
122	switch (op) {
123	case LPAM_DMA_START:
124		val |= (AHB_INTENLVL0 | AHB_DMAEN);
125		break;
126	case LPAM_DMA_STOP:
127		val &= ~(AHB_INTENLVL0 | AHB_DMAEN);
128		break;
129	default:
130		spin_unlock(&idma.lock);
131		return;
132	}
133
134	writel(val, idma.regs + I2SAHB);
135	spin_unlock(&idma.lock);
136}
137
138static void idma_done(void *id, int bytes_xfer)
139{
140	struct snd_pcm_substream *substream = id;
141	struct idma_ctrl *prtd = substream->runtime->private_data;
142
143	if (prtd && (prtd->state & ST_RUNNING))
144		snd_pcm_period_elapsed(substream);
145}
146
147static int idma_hw_params(struct snd_pcm_substream *substream,
148				struct snd_pcm_hw_params *params)
149{
150	struct snd_pcm_runtime *runtime = substream->runtime;
151	struct idma_ctrl *prtd = substream->runtime->private_data;
152	u32 mod = readl(idma.regs + I2SMOD);
153	u32 ahb = readl(idma.regs + I2SAHB);
154
155	ahb |= (AHB_DMARLD | AHB_INTMASK);
156	mod |= MOD_TXS_IDMA;
157	writel(ahb, idma.regs + I2SAHB);
158	writel(mod, idma.regs + I2SMOD);
159
160	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
161	runtime->dma_bytes = params_buffer_bytes(params);
162
163	prtd->start = prtd->pos = runtime->dma_addr;
164	prtd->period = params_periods(params);
165	prtd->periodsz = params_period_bytes(params);
166	prtd->end = runtime->dma_addr + runtime->dma_bytes;
167
168	idma_setcallbk(substream, idma_done);
169
170	return 0;
171}
172
173static int idma_hw_free(struct snd_pcm_substream *substream)
174{
175	snd_pcm_set_runtime_buffer(substream, NULL);
176
177	return 0;
178}
179
180static int idma_prepare(struct snd_pcm_substream *substream)
181{
182	struct idma_ctrl *prtd = substream->runtime->private_data;
183
184	prtd->pos = prtd->start;
185
186	/* flush the DMA channel */
187	idma_control(LPAM_DMA_STOP);
188	idma_enqueue(substream);
189
190	return 0;
191}
192
193static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
194{
195	struct idma_ctrl *prtd = substream->runtime->private_data;
196	int ret = 0;
197
198	spin_lock(&prtd->lock);
199
200	switch (cmd) {
201	case SNDRV_PCM_TRIGGER_RESUME:
202	case SNDRV_PCM_TRIGGER_START:
203	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
204		prtd->state |= ST_RUNNING;
205		idma_control(LPAM_DMA_START);
206		break;
207
208	case SNDRV_PCM_TRIGGER_SUSPEND:
209	case SNDRV_PCM_TRIGGER_STOP:
210	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
211		prtd->state &= ~ST_RUNNING;
212		idma_control(LPAM_DMA_STOP);
213		break;
214
215	default:
216		ret = -EINVAL;
217		break;
218	}
219
220	spin_unlock(&prtd->lock);
221
222	return ret;
223}
224
225static snd_pcm_uframes_t
226	idma_pointer(struct snd_pcm_substream *substream)
227{
228	struct snd_pcm_runtime *runtime = substream->runtime;
229	struct idma_ctrl *prtd = runtime->private_data;
230	dma_addr_t src;
231	unsigned long res;
232
233	spin_lock(&prtd->lock);
234
235	idma_getpos(&src);
236	res = src - prtd->start;
237
238	spin_unlock(&prtd->lock);
239
240	return bytes_to_frames(substream->runtime, res);
241}
242
243static int idma_mmap(struct snd_pcm_substream *substream,
244	struct vm_area_struct *vma)
245{
246	struct snd_pcm_runtime *runtime = substream->runtime;
247	unsigned long size, offset;
248	int ret;
249
250	/* From snd_pcm_lib_mmap_iomem */
251	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
252	size = vma->vm_end - vma->vm_start;
253	offset = vma->vm_pgoff << PAGE_SHIFT;
254	ret = io_remap_pfn_range(vma, vma->vm_start,
255			(runtime->dma_addr + offset) >> PAGE_SHIFT,
256			size, vma->vm_page_prot);
257
258	return ret;
259}
260
261static irqreturn_t iis_irq(int irqno, void *dev_id)
262{
263	struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id;
264	u32 iisahb, val, addr;
265
266	iisahb  = readl(idma.regs + I2SAHB);
267
268	val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0;
269
270	if (val) {
271		iisahb |= val;
272		writel(iisahb, idma.regs + I2SAHB);
273
274		addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr;
275		addr += prtd->periodsz;
276		addr %= (u32)(prtd->end - prtd->start);
277		addr += idma.lp_tx_addr;
278
279		writel(addr, idma.regs + I2SLVL0ADDR);
280
281		if (prtd->cb)
282			prtd->cb(prtd->token, prtd->period);
283	}
284
285	return IRQ_HANDLED;
286}
287
288static int idma_open(struct snd_pcm_substream *substream)
289{
290	struct snd_pcm_runtime *runtime = substream->runtime;
291	struct idma_ctrl *prtd;
292	int ret;
293
294	snd_soc_set_runtime_hwparams(substream, &idma_hardware);
295
296	prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL);
297	if (prtd == NULL)
298		return -ENOMEM;
299
300	ret = request_irq(idma_irq, iis_irq, 0, "i2s", prtd);
301	if (ret < 0) {
302		pr_err("fail to claim i2s irq , ret = %d\n", ret);
303		kfree(prtd);
304		return ret;
305	}
306
307	spin_lock_init(&prtd->lock);
308
309	runtime->private_data = prtd;
310
311	return 0;
312}
313
314static int idma_close(struct snd_pcm_substream *substream)
315{
316	struct snd_pcm_runtime *runtime = substream->runtime;
317	struct idma_ctrl *prtd = runtime->private_data;
318
319	free_irq(idma_irq, prtd);
320
321	if (!prtd)
322		pr_err("idma_close called with prtd == NULL\n");
323
324	kfree(prtd);
325
326	return 0;
327}
328
329static struct snd_pcm_ops idma_ops = {
330	.open		= idma_open,
331	.close		= idma_close,
332	.ioctl		= snd_pcm_lib_ioctl,
333	.trigger	= idma_trigger,
334	.pointer	= idma_pointer,
335	.mmap		= idma_mmap,
336	.hw_params	= idma_hw_params,
337	.hw_free	= idma_hw_free,
338	.prepare	= idma_prepare,
339};
340
341static void idma_free(struct snd_pcm *pcm)
342{
343	struct snd_pcm_substream *substream;
344	struct snd_dma_buffer *buf;
345
346	substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
347	if (!substream)
348		return;
349
350	buf = &substream->dma_buffer;
351	if (!buf->area)
352		return;
353
354	iounmap((void __iomem *)buf->area);
355
356	buf->area = NULL;
357	buf->addr = 0;
358}
359
360static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
361{
362	struct snd_pcm_substream *substream = pcm->streams[stream].substream;
363	struct snd_dma_buffer *buf = &substream->dma_buffer;
364
365	buf->dev.dev = pcm->card->dev;
366	buf->private_data = NULL;
367
368	/* Assign PCM buffer pointers */
369	buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
370	buf->addr = idma.lp_tx_addr;
371	buf->bytes = idma_hardware.buffer_bytes_max;
372	buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes);
373
374	return 0;
375}
376
377static int idma_new(struct snd_soc_pcm_runtime *rtd)
378{
379	struct snd_card *card = rtd->card->snd_card;
380	struct snd_pcm *pcm = rtd->pcm;
381	int ret;
382
383	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
384	if (ret)
385		return ret;
386
387	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
388		ret = preallocate_idma_buffer(pcm,
389				SNDRV_PCM_STREAM_PLAYBACK);
390	}
391
392	return ret;
393}
394
395void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr)
396{
397	spin_lock_init(&idma.lock);
398	idma.regs = regs;
399	idma.lp_tx_addr = addr;
400}
401EXPORT_SYMBOL_GPL(idma_reg_addr_init);
402
403static struct snd_soc_platform_driver asoc_idma_platform = {
404	.ops = &idma_ops,
405	.pcm_new = idma_new,
406	.pcm_free = idma_free,
407};
408
409static int asoc_idma_platform_probe(struct platform_device *pdev)
410{
411	idma_irq = platform_get_irq(pdev, 0);
412	if (idma_irq < 0)
413		return idma_irq;
414
415	return devm_snd_soc_register_platform(&pdev->dev, &asoc_idma_platform);
416}
417
418static struct platform_driver asoc_idma_driver = {
419	.driver = {
420		.name = "samsung-idma",
421	},
422
423	.probe = asoc_idma_platform_probe,
424};
425
426module_platform_driver(asoc_idma_driver);
427
428MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>");
429MODULE_DESCRIPTION("Samsung ASoC IDMA Driver");
430MODULE_LICENSE("GPL");
  1/*
  2 * sound/soc/samsung/idma.c
  3 *
  4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  5 *		http://www.samsung.com
  6 *
  7 * I2S0's Internal DMA driver
  8 *
  9 * This program is free software; you can redistribute  it and/or modify it
 10 * under  the terms of  the GNU General  Public License as published by the
 11 * Free Software Foundation;  either version 2 of the  License, or (at your
 12 * option) any later version.
 13 */
 14#include <linux/interrupt.h>
 15#include <linux/platform_device.h>
 16#include <linux/dma-mapping.h>
 17#include <linux/slab.h>
 18#include <linux/module.h>
 19#include <sound/pcm.h>
 20#include <sound/pcm_params.h>
 21#include <sound/soc.h>
 22
 23#include "i2s.h"
 24#include "idma.h"
 
 25#include "i2s-regs.h"
 26
 27#define ST_RUNNING		(1<<0)
 28#define ST_OPENED		(1<<1)
 29
 30static const struct snd_pcm_hardware idma_hardware = {
 31	.info = SNDRV_PCM_INFO_INTERLEAVED |
 32		    SNDRV_PCM_INFO_BLOCK_TRANSFER |
 33		    SNDRV_PCM_INFO_MMAP |
 34		    SNDRV_PCM_INFO_MMAP_VALID |
 35		    SNDRV_PCM_INFO_PAUSE |
 36		    SNDRV_PCM_INFO_RESUME,
 37	.buffer_bytes_max = MAX_IDMA_BUFFER,
 38	.period_bytes_min = 128,
 39	.period_bytes_max = MAX_IDMA_PERIOD,
 40	.periods_min = 1,
 41	.periods_max = 2,
 42};
 43
 44struct idma_ctrl {
 45	spinlock_t	lock;
 46	int		state;
 47	dma_addr_t	start;
 48	dma_addr_t	pos;
 49	dma_addr_t	end;
 50	dma_addr_t	period;
 51	dma_addr_t	periodsz;
 52	void		*token;
 53	void		(*cb)(void *dt, int bytes_xfer);
 54};
 55
 56static struct idma_info {
 57	spinlock_t	lock;
 58	void		 __iomem  *regs;
 59	dma_addr_t	lp_tx_addr;
 60} idma;
 61
 62static int idma_irq;
 63
 64static void idma_getpos(dma_addr_t *src)
 65{
 66	*src = idma.lp_tx_addr +
 67		(readl(idma.regs + I2STRNCNT) & 0xffffff) * 4;
 68}
 69
 70static int idma_enqueue(struct snd_pcm_substream *substream)
 71{
 72	struct snd_pcm_runtime *runtime = substream->runtime;
 73	struct idma_ctrl *prtd = substream->runtime->private_data;
 74	u32 val;
 75
 76	spin_lock(&prtd->lock);
 77	prtd->token = (void *) substream;
 78	spin_unlock(&prtd->lock);
 79
 80	/* Internal DMA Level0 Interrupt Address */
 81	val = idma.lp_tx_addr + prtd->periodsz;
 82	writel(val, idma.regs + I2SLVL0ADDR);
 83
 84	/* Start address0 of I2S internal DMA operation. */
 85	val = idma.lp_tx_addr;
 86	writel(val, idma.regs + I2SSTR0);
 87
 88	/*
 89	 * Transfer block size for I2S internal DMA.
 90	 * Should decide transfer size before start dma operation
 91	 */
 92	val = readl(idma.regs + I2SSIZE);
 93	val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT);
 94	val |= (((runtime->dma_bytes >> 2) &
 95			I2SSIZE_TRNMSK) << I2SSIZE_SHIFT);
 96	writel(val, idma.regs + I2SSIZE);
 97
 98	val = readl(idma.regs + I2SAHB);
 99	val |= AHB_INTENLVL0;
100	writel(val, idma.regs + I2SAHB);
101
102	return 0;
103}
104
105static void idma_setcallbk(struct snd_pcm_substream *substream,
106				void (*cb)(void *, int))
107{
108	struct idma_ctrl *prtd = substream->runtime->private_data;
109
110	spin_lock(&prtd->lock);
111	prtd->cb = cb;
112	spin_unlock(&prtd->lock);
113}
114
115static void idma_control(int op)
116{
117	u32 val = readl(idma.regs + I2SAHB);
118
119	spin_lock(&idma.lock);
120
121	switch (op) {
122	case LPAM_DMA_START:
123		val |= (AHB_INTENLVL0 | AHB_DMAEN);
124		break;
125	case LPAM_DMA_STOP:
126		val &= ~(AHB_INTENLVL0 | AHB_DMAEN);
127		break;
128	default:
129		spin_unlock(&idma.lock);
130		return;
131	}
132
133	writel(val, idma.regs + I2SAHB);
134	spin_unlock(&idma.lock);
135}
136
137static void idma_done(void *id, int bytes_xfer)
138{
139	struct snd_pcm_substream *substream = id;
140	struct idma_ctrl *prtd = substream->runtime->private_data;
141
142	if (prtd && (prtd->state & ST_RUNNING))
143		snd_pcm_period_elapsed(substream);
144}
145
146static int idma_hw_params(struct snd_pcm_substream *substream,
147				struct snd_pcm_hw_params *params)
148{
149	struct snd_pcm_runtime *runtime = substream->runtime;
150	struct idma_ctrl *prtd = substream->runtime->private_data;
151	u32 mod = readl(idma.regs + I2SMOD);
152	u32 ahb = readl(idma.regs + I2SAHB);
153
154	ahb |= (AHB_DMARLD | AHB_INTMASK);
155	mod |= MOD_TXS_IDMA;
156	writel(ahb, idma.regs + I2SAHB);
157	writel(mod, idma.regs + I2SMOD);
158
159	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
160	runtime->dma_bytes = params_buffer_bytes(params);
161
162	prtd->start = prtd->pos = runtime->dma_addr;
163	prtd->period = params_periods(params);
164	prtd->periodsz = params_period_bytes(params);
165	prtd->end = runtime->dma_addr + runtime->dma_bytes;
166
167	idma_setcallbk(substream, idma_done);
168
169	return 0;
170}
171
172static int idma_hw_free(struct snd_pcm_substream *substream)
173{
174	snd_pcm_set_runtime_buffer(substream, NULL);
175
176	return 0;
177}
178
179static int idma_prepare(struct snd_pcm_substream *substream)
180{
181	struct idma_ctrl *prtd = substream->runtime->private_data;
182
183	prtd->pos = prtd->start;
184
185	/* flush the DMA channel */
186	idma_control(LPAM_DMA_STOP);
187	idma_enqueue(substream);
188
189	return 0;
190}
191
192static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
193{
194	struct idma_ctrl *prtd = substream->runtime->private_data;
195	int ret = 0;
196
197	spin_lock(&prtd->lock);
198
199	switch (cmd) {
200	case SNDRV_PCM_TRIGGER_RESUME:
201	case SNDRV_PCM_TRIGGER_START:
202	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
203		prtd->state |= ST_RUNNING;
204		idma_control(LPAM_DMA_START);
205		break;
206
207	case SNDRV_PCM_TRIGGER_SUSPEND:
208	case SNDRV_PCM_TRIGGER_STOP:
209	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
210		prtd->state &= ~ST_RUNNING;
211		idma_control(LPAM_DMA_STOP);
212		break;
213
214	default:
215		ret = -EINVAL;
216		break;
217	}
218
219	spin_unlock(&prtd->lock);
220
221	return ret;
222}
223
224static snd_pcm_uframes_t
225	idma_pointer(struct snd_pcm_substream *substream)
226{
227	struct snd_pcm_runtime *runtime = substream->runtime;
228	struct idma_ctrl *prtd = runtime->private_data;
229	dma_addr_t src;
230	unsigned long res;
231
232	spin_lock(&prtd->lock);
233
234	idma_getpos(&src);
235	res = src - prtd->start;
236
237	spin_unlock(&prtd->lock);
238
239	return bytes_to_frames(substream->runtime, res);
240}
241
242static int idma_mmap(struct snd_pcm_substream *substream,
243	struct vm_area_struct *vma)
244{
245	struct snd_pcm_runtime *runtime = substream->runtime;
246	unsigned long size, offset;
247	int ret;
248
249	/* From snd_pcm_lib_mmap_iomem */
250	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
251	size = vma->vm_end - vma->vm_start;
252	offset = vma->vm_pgoff << PAGE_SHIFT;
253	ret = io_remap_pfn_range(vma, vma->vm_start,
254			(runtime->dma_addr + offset) >> PAGE_SHIFT,
255			size, vma->vm_page_prot);
256
257	return ret;
258}
259
260static irqreturn_t iis_irq(int irqno, void *dev_id)
261{
262	struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id;
263	u32 iisahb, val, addr;
264
265	iisahb  = readl(idma.regs + I2SAHB);
266
267	val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0;
268
269	if (val) {
270		iisahb |= val;
271		writel(iisahb, idma.regs + I2SAHB);
272
273		addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr;
274		addr += prtd->periodsz;
275		addr %= (u32)(prtd->end - prtd->start);
276		addr += idma.lp_tx_addr;
277
278		writel(addr, idma.regs + I2SLVL0ADDR);
279
280		if (prtd->cb)
281			prtd->cb(prtd->token, prtd->period);
282	}
283
284	return IRQ_HANDLED;
285}
286
287static int idma_open(struct snd_pcm_substream *substream)
288{
289	struct snd_pcm_runtime *runtime = substream->runtime;
290	struct idma_ctrl *prtd;
291	int ret;
292
293	snd_soc_set_runtime_hwparams(substream, &idma_hardware);
294
295	prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL);
296	if (prtd == NULL)
297		return -ENOMEM;
298
299	ret = request_irq(idma_irq, iis_irq, 0, "i2s", prtd);
300	if (ret < 0) {
301		pr_err("fail to claim i2s irq , ret = %d\n", ret);
302		kfree(prtd);
303		return ret;
304	}
305
306	spin_lock_init(&prtd->lock);
307
308	runtime->private_data = prtd;
309
310	return 0;
311}
312
313static int idma_close(struct snd_pcm_substream *substream)
314{
315	struct snd_pcm_runtime *runtime = substream->runtime;
316	struct idma_ctrl *prtd = runtime->private_data;
317
318	free_irq(idma_irq, prtd);
319
320	if (!prtd)
321		pr_err("idma_close called with prtd == NULL\n");
322
323	kfree(prtd);
324
325	return 0;
326}
327
328static struct snd_pcm_ops idma_ops = {
329	.open		= idma_open,
330	.close		= idma_close,
331	.ioctl		= snd_pcm_lib_ioctl,
332	.trigger	= idma_trigger,
333	.pointer	= idma_pointer,
334	.mmap		= idma_mmap,
335	.hw_params	= idma_hw_params,
336	.hw_free	= idma_hw_free,
337	.prepare	= idma_prepare,
338};
339
340static void idma_free(struct snd_pcm *pcm)
341{
342	struct snd_pcm_substream *substream;
343	struct snd_dma_buffer *buf;
344
345	substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
346	if (!substream)
347		return;
348
349	buf = &substream->dma_buffer;
350	if (!buf->area)
351		return;
352
353	iounmap((void __iomem *)buf->area);
354
355	buf->area = NULL;
356	buf->addr = 0;
357}
358
359static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
360{
361	struct snd_pcm_substream *substream = pcm->streams[stream].substream;
362	struct snd_dma_buffer *buf = &substream->dma_buffer;
363
364	buf->dev.dev = pcm->card->dev;
365	buf->private_data = NULL;
366
367	/* Assign PCM buffer pointers */
368	buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
369	buf->addr = idma.lp_tx_addr;
370	buf->bytes = idma_hardware.buffer_bytes_max;
371	buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes);
372
373	return 0;
374}
375
376static int idma_new(struct snd_soc_pcm_runtime *rtd)
377{
378	struct snd_card *card = rtd->card->snd_card;
379	struct snd_pcm *pcm = rtd->pcm;
380	int ret;
381
382	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
383	if (ret)
384		return ret;
385
386	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
387		ret = preallocate_idma_buffer(pcm,
388				SNDRV_PCM_STREAM_PLAYBACK);
389	}
390
391	return ret;
392}
393
394void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr)
395{
396	spin_lock_init(&idma.lock);
397	idma.regs = regs;
398	idma.lp_tx_addr = addr;
399}
400EXPORT_SYMBOL_GPL(idma_reg_addr_init);
401
402static struct snd_soc_platform_driver asoc_idma_platform = {
403	.ops = &idma_ops,
404	.pcm_new = idma_new,
405	.pcm_free = idma_free,
406};
407
408static int asoc_idma_platform_probe(struct platform_device *pdev)
409{
410	idma_irq = platform_get_irq(pdev, 0);
411	if (idma_irq < 0)
412		return idma_irq;
413
414	return devm_snd_soc_register_platform(&pdev->dev, &asoc_idma_platform);
415}
416
417static struct platform_driver asoc_idma_driver = {
418	.driver = {
419		.name = "samsung-idma",
420	},
421
422	.probe = asoc_idma_platform_probe,
423};
424
425module_platform_driver(asoc_idma_driver);
426
427MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>");
428MODULE_DESCRIPTION("Samsung ASoC IDMA Driver");
429MODULE_LICENSE("GPL");