Loading...
1/*
2 * sound/soc/samsung/idma.c
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * I2S0's Internal DMA driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14#include <linux/interrupt.h>
15#include <linux/platform_device.h>
16#include <linux/dma-mapping.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <sound/pcm.h>
20#include <sound/pcm_params.h>
21#include <sound/soc.h>
22
23#include "i2s.h"
24#include "idma.h"
25#include "dma.h"
26#include "i2s-regs.h"
27
28#define ST_RUNNING (1<<0)
29#define ST_OPENED (1<<1)
30
31static const struct snd_pcm_hardware idma_hardware = {
32 .info = SNDRV_PCM_INFO_INTERLEAVED |
33 SNDRV_PCM_INFO_BLOCK_TRANSFER |
34 SNDRV_PCM_INFO_MMAP |
35 SNDRV_PCM_INFO_MMAP_VALID |
36 SNDRV_PCM_INFO_PAUSE |
37 SNDRV_PCM_INFO_RESUME,
38 .buffer_bytes_max = MAX_IDMA_BUFFER,
39 .period_bytes_min = 128,
40 .period_bytes_max = MAX_IDMA_PERIOD,
41 .periods_min = 1,
42 .periods_max = 2,
43};
44
45struct idma_ctrl {
46 spinlock_t lock;
47 int state;
48 dma_addr_t start;
49 dma_addr_t pos;
50 dma_addr_t end;
51 dma_addr_t period;
52 dma_addr_t periodsz;
53 void *token;
54 void (*cb)(void *dt, int bytes_xfer);
55};
56
57static struct idma_info {
58 spinlock_t lock;
59 void __iomem *regs;
60 dma_addr_t lp_tx_addr;
61} idma;
62
63static int idma_irq;
64
65static void idma_getpos(dma_addr_t *src)
66{
67 *src = idma.lp_tx_addr +
68 (readl(idma.regs + I2STRNCNT) & 0xffffff) * 4;
69}
70
71static int idma_enqueue(struct snd_pcm_substream *substream)
72{
73 struct snd_pcm_runtime *runtime = substream->runtime;
74 struct idma_ctrl *prtd = substream->runtime->private_data;
75 u32 val;
76
77 spin_lock(&prtd->lock);
78 prtd->token = (void *) substream;
79 spin_unlock(&prtd->lock);
80
81 /* Internal DMA Level0 Interrupt Address */
82 val = idma.lp_tx_addr + prtd->periodsz;
83 writel(val, idma.regs + I2SLVL0ADDR);
84
85 /* Start address0 of I2S internal DMA operation. */
86 val = idma.lp_tx_addr;
87 writel(val, idma.regs + I2SSTR0);
88
89 /*
90 * Transfer block size for I2S internal DMA.
91 * Should decide transfer size before start dma operation
92 */
93 val = readl(idma.regs + I2SSIZE);
94 val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT);
95 val |= (((runtime->dma_bytes >> 2) &
96 I2SSIZE_TRNMSK) << I2SSIZE_SHIFT);
97 writel(val, idma.regs + I2SSIZE);
98
99 val = readl(idma.regs + I2SAHB);
100 val |= AHB_INTENLVL0;
101 writel(val, idma.regs + I2SAHB);
102
103 return 0;
104}
105
106static void idma_setcallbk(struct snd_pcm_substream *substream,
107 void (*cb)(void *, int))
108{
109 struct idma_ctrl *prtd = substream->runtime->private_data;
110
111 spin_lock(&prtd->lock);
112 prtd->cb = cb;
113 spin_unlock(&prtd->lock);
114}
115
116static void idma_control(int op)
117{
118 u32 val = readl(idma.regs + I2SAHB);
119
120 spin_lock(&idma.lock);
121
122 switch (op) {
123 case LPAM_DMA_START:
124 val |= (AHB_INTENLVL0 | AHB_DMAEN);
125 break;
126 case LPAM_DMA_STOP:
127 val &= ~(AHB_INTENLVL0 | AHB_DMAEN);
128 break;
129 default:
130 spin_unlock(&idma.lock);
131 return;
132 }
133
134 writel(val, idma.regs + I2SAHB);
135 spin_unlock(&idma.lock);
136}
137
138static void idma_done(void *id, int bytes_xfer)
139{
140 struct snd_pcm_substream *substream = id;
141 struct idma_ctrl *prtd = substream->runtime->private_data;
142
143 if (prtd && (prtd->state & ST_RUNNING))
144 snd_pcm_period_elapsed(substream);
145}
146
147static int idma_hw_params(struct snd_pcm_substream *substream,
148 struct snd_pcm_hw_params *params)
149{
150 struct snd_pcm_runtime *runtime = substream->runtime;
151 struct idma_ctrl *prtd = substream->runtime->private_data;
152 u32 mod = readl(idma.regs + I2SMOD);
153 u32 ahb = readl(idma.regs + I2SAHB);
154
155 ahb |= (AHB_DMARLD | AHB_INTMASK);
156 mod |= MOD_TXS_IDMA;
157 writel(ahb, idma.regs + I2SAHB);
158 writel(mod, idma.regs + I2SMOD);
159
160 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
161 runtime->dma_bytes = params_buffer_bytes(params);
162
163 prtd->start = prtd->pos = runtime->dma_addr;
164 prtd->period = params_periods(params);
165 prtd->periodsz = params_period_bytes(params);
166 prtd->end = runtime->dma_addr + runtime->dma_bytes;
167
168 idma_setcallbk(substream, idma_done);
169
170 return 0;
171}
172
173static int idma_hw_free(struct snd_pcm_substream *substream)
174{
175 snd_pcm_set_runtime_buffer(substream, NULL);
176
177 return 0;
178}
179
180static int idma_prepare(struct snd_pcm_substream *substream)
181{
182 struct idma_ctrl *prtd = substream->runtime->private_data;
183
184 prtd->pos = prtd->start;
185
186 /* flush the DMA channel */
187 idma_control(LPAM_DMA_STOP);
188 idma_enqueue(substream);
189
190 return 0;
191}
192
193static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
194{
195 struct idma_ctrl *prtd = substream->runtime->private_data;
196 int ret = 0;
197
198 spin_lock(&prtd->lock);
199
200 switch (cmd) {
201 case SNDRV_PCM_TRIGGER_RESUME:
202 case SNDRV_PCM_TRIGGER_START:
203 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
204 prtd->state |= ST_RUNNING;
205 idma_control(LPAM_DMA_START);
206 break;
207
208 case SNDRV_PCM_TRIGGER_SUSPEND:
209 case SNDRV_PCM_TRIGGER_STOP:
210 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
211 prtd->state &= ~ST_RUNNING;
212 idma_control(LPAM_DMA_STOP);
213 break;
214
215 default:
216 ret = -EINVAL;
217 break;
218 }
219
220 spin_unlock(&prtd->lock);
221
222 return ret;
223}
224
225static snd_pcm_uframes_t
226 idma_pointer(struct snd_pcm_substream *substream)
227{
228 struct snd_pcm_runtime *runtime = substream->runtime;
229 struct idma_ctrl *prtd = runtime->private_data;
230 dma_addr_t src;
231 unsigned long res;
232
233 spin_lock(&prtd->lock);
234
235 idma_getpos(&src);
236 res = src - prtd->start;
237
238 spin_unlock(&prtd->lock);
239
240 return bytes_to_frames(substream->runtime, res);
241}
242
243static int idma_mmap(struct snd_pcm_substream *substream,
244 struct vm_area_struct *vma)
245{
246 struct snd_pcm_runtime *runtime = substream->runtime;
247 unsigned long size, offset;
248 int ret;
249
250 /* From snd_pcm_lib_mmap_iomem */
251 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
252 size = vma->vm_end - vma->vm_start;
253 offset = vma->vm_pgoff << PAGE_SHIFT;
254 ret = io_remap_pfn_range(vma, vma->vm_start,
255 (runtime->dma_addr + offset) >> PAGE_SHIFT,
256 size, vma->vm_page_prot);
257
258 return ret;
259}
260
261static irqreturn_t iis_irq(int irqno, void *dev_id)
262{
263 struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id;
264 u32 iisahb, val, addr;
265
266 iisahb = readl(idma.regs + I2SAHB);
267
268 val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0;
269
270 if (val) {
271 iisahb |= val;
272 writel(iisahb, idma.regs + I2SAHB);
273
274 addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr;
275 addr += prtd->periodsz;
276 addr %= (u32)(prtd->end - prtd->start);
277 addr += idma.lp_tx_addr;
278
279 writel(addr, idma.regs + I2SLVL0ADDR);
280
281 if (prtd->cb)
282 prtd->cb(prtd->token, prtd->period);
283 }
284
285 return IRQ_HANDLED;
286}
287
288static int idma_open(struct snd_pcm_substream *substream)
289{
290 struct snd_pcm_runtime *runtime = substream->runtime;
291 struct idma_ctrl *prtd;
292 int ret;
293
294 snd_soc_set_runtime_hwparams(substream, &idma_hardware);
295
296 prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL);
297 if (prtd == NULL)
298 return -ENOMEM;
299
300 ret = request_irq(idma_irq, iis_irq, 0, "i2s", prtd);
301 if (ret < 0) {
302 pr_err("fail to claim i2s irq , ret = %d\n", ret);
303 kfree(prtd);
304 return ret;
305 }
306
307 spin_lock_init(&prtd->lock);
308
309 runtime->private_data = prtd;
310
311 return 0;
312}
313
314static int idma_close(struct snd_pcm_substream *substream)
315{
316 struct snd_pcm_runtime *runtime = substream->runtime;
317 struct idma_ctrl *prtd = runtime->private_data;
318
319 free_irq(idma_irq, prtd);
320
321 if (!prtd)
322 pr_err("idma_close called with prtd == NULL\n");
323
324 kfree(prtd);
325
326 return 0;
327}
328
329static struct snd_pcm_ops idma_ops = {
330 .open = idma_open,
331 .close = idma_close,
332 .ioctl = snd_pcm_lib_ioctl,
333 .trigger = idma_trigger,
334 .pointer = idma_pointer,
335 .mmap = idma_mmap,
336 .hw_params = idma_hw_params,
337 .hw_free = idma_hw_free,
338 .prepare = idma_prepare,
339};
340
341static void idma_free(struct snd_pcm *pcm)
342{
343 struct snd_pcm_substream *substream;
344 struct snd_dma_buffer *buf;
345
346 substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
347 if (!substream)
348 return;
349
350 buf = &substream->dma_buffer;
351 if (!buf->area)
352 return;
353
354 iounmap((void __iomem *)buf->area);
355
356 buf->area = NULL;
357 buf->addr = 0;
358}
359
360static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
361{
362 struct snd_pcm_substream *substream = pcm->streams[stream].substream;
363 struct snd_dma_buffer *buf = &substream->dma_buffer;
364
365 buf->dev.dev = pcm->card->dev;
366 buf->private_data = NULL;
367
368 /* Assign PCM buffer pointers */
369 buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
370 buf->addr = idma.lp_tx_addr;
371 buf->bytes = idma_hardware.buffer_bytes_max;
372 buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes);
373
374 return 0;
375}
376
377static int idma_new(struct snd_soc_pcm_runtime *rtd)
378{
379 struct snd_card *card = rtd->card->snd_card;
380 struct snd_pcm *pcm = rtd->pcm;
381 int ret;
382
383 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
384 if (ret)
385 return ret;
386
387 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
388 ret = preallocate_idma_buffer(pcm,
389 SNDRV_PCM_STREAM_PLAYBACK);
390 }
391
392 return ret;
393}
394
395void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr)
396{
397 spin_lock_init(&idma.lock);
398 idma.regs = regs;
399 idma.lp_tx_addr = addr;
400}
401EXPORT_SYMBOL_GPL(idma_reg_addr_init);
402
403static struct snd_soc_platform_driver asoc_idma_platform = {
404 .ops = &idma_ops,
405 .pcm_new = idma_new,
406 .pcm_free = idma_free,
407};
408
409static int asoc_idma_platform_probe(struct platform_device *pdev)
410{
411 idma_irq = platform_get_irq(pdev, 0);
412 if (idma_irq < 0)
413 return idma_irq;
414
415 return devm_snd_soc_register_platform(&pdev->dev, &asoc_idma_platform);
416}
417
418static struct platform_driver asoc_idma_driver = {
419 .driver = {
420 .name = "samsung-idma",
421 },
422
423 .probe = asoc_idma_platform_probe,
424};
425
426module_platform_driver(asoc_idma_driver);
427
428MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>");
429MODULE_DESCRIPTION("Samsung ASoC IDMA Driver");
430MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0+
2//
3// idma.c - I2S0 internal DMA driver
4//
5// Copyright (c) 2011 Samsung Electronics Co., Ltd.
6// http://www.samsung.com
7
8#include <linux/interrupt.h>
9#include <linux/platform_device.h>
10#include <linux/dma-mapping.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <sound/pcm.h>
14#include <sound/pcm_params.h>
15#include <sound/soc.h>
16
17#include "i2s.h"
18#include "idma.h"
19#include "i2s-regs.h"
20
21#define ST_RUNNING (1<<0)
22#define ST_OPENED (1<<1)
23
24static const struct snd_pcm_hardware idma_hardware = {
25 .info = SNDRV_PCM_INFO_INTERLEAVED |
26 SNDRV_PCM_INFO_BLOCK_TRANSFER |
27 SNDRV_PCM_INFO_MMAP |
28 SNDRV_PCM_INFO_MMAP_VALID |
29 SNDRV_PCM_INFO_PAUSE |
30 SNDRV_PCM_INFO_RESUME,
31 .buffer_bytes_max = MAX_IDMA_BUFFER,
32 .period_bytes_min = 128,
33 .period_bytes_max = MAX_IDMA_PERIOD,
34 .periods_min = 1,
35 .periods_max = 2,
36};
37
38struct idma_ctrl {
39 spinlock_t lock;
40 int state;
41 dma_addr_t start;
42 dma_addr_t pos;
43 dma_addr_t end;
44 dma_addr_t period;
45 dma_addr_t periodsz;
46 void *token;
47 void (*cb)(void *dt, int bytes_xfer);
48};
49
50static struct idma_info {
51 spinlock_t lock;
52 void __iomem *regs;
53 dma_addr_t lp_tx_addr;
54} idma;
55
56static int idma_irq;
57
58static void idma_getpos(dma_addr_t *src)
59{
60 *src = idma.lp_tx_addr +
61 (readl(idma.regs + I2STRNCNT) & 0xffffff) * 4;
62}
63
64static int idma_enqueue(struct snd_pcm_substream *substream)
65{
66 struct snd_pcm_runtime *runtime = substream->runtime;
67 struct idma_ctrl *prtd = substream->runtime->private_data;
68 u32 val;
69
70 spin_lock(&prtd->lock);
71 prtd->token = (void *) substream;
72 spin_unlock(&prtd->lock);
73
74 /* Internal DMA Level0 Interrupt Address */
75 val = idma.lp_tx_addr + prtd->periodsz;
76 writel(val, idma.regs + I2SLVL0ADDR);
77
78 /* Start address0 of I2S internal DMA operation. */
79 val = idma.lp_tx_addr;
80 writel(val, idma.regs + I2SSTR0);
81
82 /*
83 * Transfer block size for I2S internal DMA.
84 * Should decide transfer size before start dma operation
85 */
86 val = readl(idma.regs + I2SSIZE);
87 val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT);
88 val |= (((runtime->dma_bytes >> 2) &
89 I2SSIZE_TRNMSK) << I2SSIZE_SHIFT);
90 writel(val, idma.regs + I2SSIZE);
91
92 val = readl(idma.regs + I2SAHB);
93 val |= AHB_INTENLVL0;
94 writel(val, idma.regs + I2SAHB);
95
96 return 0;
97}
98
99static void idma_setcallbk(struct snd_pcm_substream *substream,
100 void (*cb)(void *, int))
101{
102 struct idma_ctrl *prtd = substream->runtime->private_data;
103
104 spin_lock(&prtd->lock);
105 prtd->cb = cb;
106 spin_unlock(&prtd->lock);
107}
108
109static void idma_control(int op)
110{
111 u32 val = readl(idma.regs + I2SAHB);
112
113 spin_lock(&idma.lock);
114
115 switch (op) {
116 case LPAM_DMA_START:
117 val |= (AHB_INTENLVL0 | AHB_DMAEN);
118 break;
119 case LPAM_DMA_STOP:
120 val &= ~(AHB_INTENLVL0 | AHB_DMAEN);
121 break;
122 default:
123 spin_unlock(&idma.lock);
124 return;
125 }
126
127 writel(val, idma.regs + I2SAHB);
128 spin_unlock(&idma.lock);
129}
130
131static void idma_done(void *id, int bytes_xfer)
132{
133 struct snd_pcm_substream *substream = id;
134 struct idma_ctrl *prtd = substream->runtime->private_data;
135
136 if (prtd && (prtd->state & ST_RUNNING))
137 snd_pcm_period_elapsed(substream);
138}
139
140static int idma_hw_params(struct snd_soc_component *component,
141 struct snd_pcm_substream *substream,
142 struct snd_pcm_hw_params *params)
143{
144 struct snd_pcm_runtime *runtime = substream->runtime;
145 struct idma_ctrl *prtd = substream->runtime->private_data;
146 u32 mod = readl(idma.regs + I2SMOD);
147 u32 ahb = readl(idma.regs + I2SAHB);
148
149 ahb |= (AHB_DMARLD | AHB_INTMASK);
150 mod |= MOD_TXS_IDMA;
151 writel(ahb, idma.regs + I2SAHB);
152 writel(mod, idma.regs + I2SMOD);
153
154 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
155 runtime->dma_bytes = params_buffer_bytes(params);
156
157 prtd->start = prtd->pos = runtime->dma_addr;
158 prtd->period = params_periods(params);
159 prtd->periodsz = params_period_bytes(params);
160 prtd->end = runtime->dma_addr + runtime->dma_bytes;
161
162 idma_setcallbk(substream, idma_done);
163
164 return 0;
165}
166
167static int idma_hw_free(struct snd_soc_component *component,
168 struct snd_pcm_substream *substream)
169{
170 snd_pcm_set_runtime_buffer(substream, NULL);
171
172 return 0;
173}
174
175static int idma_prepare(struct snd_soc_component *component,
176 struct snd_pcm_substream *substream)
177{
178 struct idma_ctrl *prtd = substream->runtime->private_data;
179
180 prtd->pos = prtd->start;
181
182 /* flush the DMA channel */
183 idma_control(LPAM_DMA_STOP);
184 idma_enqueue(substream);
185
186 return 0;
187}
188
189static int idma_trigger(struct snd_soc_component *component,
190 struct snd_pcm_substream *substream, int cmd)
191{
192 struct idma_ctrl *prtd = substream->runtime->private_data;
193 int ret = 0;
194
195 spin_lock(&prtd->lock);
196
197 switch (cmd) {
198 case SNDRV_PCM_TRIGGER_RESUME:
199 case SNDRV_PCM_TRIGGER_START:
200 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
201 prtd->state |= ST_RUNNING;
202 idma_control(LPAM_DMA_START);
203 break;
204
205 case SNDRV_PCM_TRIGGER_SUSPEND:
206 case SNDRV_PCM_TRIGGER_STOP:
207 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
208 prtd->state &= ~ST_RUNNING;
209 idma_control(LPAM_DMA_STOP);
210 break;
211
212 default:
213 ret = -EINVAL;
214 break;
215 }
216
217 spin_unlock(&prtd->lock);
218
219 return ret;
220}
221
222static snd_pcm_uframes_t
223idma_pointer(struct snd_soc_component *component,
224 struct snd_pcm_substream *substream)
225{
226 struct snd_pcm_runtime *runtime = substream->runtime;
227 struct idma_ctrl *prtd = runtime->private_data;
228 dma_addr_t src;
229 unsigned long res;
230
231 spin_lock(&prtd->lock);
232
233 idma_getpos(&src);
234 res = src - prtd->start;
235
236 spin_unlock(&prtd->lock);
237
238 return bytes_to_frames(substream->runtime, res);
239}
240
241static int idma_mmap(struct snd_soc_component *component,
242 struct snd_pcm_substream *substream,
243 struct vm_area_struct *vma)
244{
245 struct snd_pcm_runtime *runtime = substream->runtime;
246 unsigned long size, offset;
247
248 /* From snd_pcm_lib_mmap_iomem */
249 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
250 size = vma->vm_end - vma->vm_start;
251 offset = vma->vm_pgoff << PAGE_SHIFT;
252 return io_remap_pfn_range(vma, vma->vm_start,
253 (runtime->dma_addr + offset) >> PAGE_SHIFT,
254 size, vma->vm_page_prot);
255}
256
257static irqreturn_t iis_irq(int irqno, void *dev_id)
258{
259 struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id;
260 u32 iisahb, val, addr;
261
262 iisahb = readl(idma.regs + I2SAHB);
263
264 val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0;
265
266 if (val) {
267 iisahb |= val;
268 writel(iisahb, idma.regs + I2SAHB);
269
270 addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr;
271 addr += prtd->periodsz;
272 addr %= (u32)(prtd->end - prtd->start);
273 addr += idma.lp_tx_addr;
274
275 writel(addr, idma.regs + I2SLVL0ADDR);
276
277 if (prtd->cb)
278 prtd->cb(prtd->token, prtd->period);
279 }
280
281 return IRQ_HANDLED;
282}
283
284static int idma_open(struct snd_soc_component *component,
285 struct snd_pcm_substream *substream)
286{
287 struct snd_pcm_runtime *runtime = substream->runtime;
288 struct idma_ctrl *prtd;
289 int ret;
290
291 snd_soc_set_runtime_hwparams(substream, &idma_hardware);
292
293 prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL);
294 if (prtd == NULL)
295 return -ENOMEM;
296
297 ret = request_irq(idma_irq, iis_irq, 0, "i2s", prtd);
298 if (ret < 0) {
299 pr_err("fail to claim i2s irq , ret = %d\n", ret);
300 kfree(prtd);
301 return ret;
302 }
303
304 spin_lock_init(&prtd->lock);
305
306 runtime->private_data = prtd;
307
308 return 0;
309}
310
311static int idma_close(struct snd_soc_component *component,
312 struct snd_pcm_substream *substream)
313{
314 struct snd_pcm_runtime *runtime = substream->runtime;
315 struct idma_ctrl *prtd = runtime->private_data;
316
317 free_irq(idma_irq, prtd);
318
319 if (!prtd)
320 pr_err("idma_close called with prtd == NULL\n");
321
322 kfree(prtd);
323
324 return 0;
325}
326
327static void idma_free(struct snd_soc_component *component,
328 struct snd_pcm *pcm)
329{
330 struct snd_pcm_substream *substream;
331 struct snd_dma_buffer *buf;
332
333 substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
334 if (!substream)
335 return;
336
337 buf = &substream->dma_buffer;
338 if (!buf->area)
339 return;
340
341 iounmap((void __iomem *)buf->area);
342
343 buf->area = NULL;
344 buf->addr = 0;
345}
346
347static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
348{
349 struct snd_pcm_substream *substream = pcm->streams[stream].substream;
350 struct snd_dma_buffer *buf = &substream->dma_buffer;
351
352 buf->dev.dev = pcm->card->dev;
353 buf->private_data = NULL;
354
355 /* Assign PCM buffer pointers */
356 buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
357 buf->addr = idma.lp_tx_addr;
358 buf->bytes = idma_hardware.buffer_bytes_max;
359 buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes);
360 if (!buf->area)
361 return -ENOMEM;
362
363 return 0;
364}
365
366static int idma_new(struct snd_soc_component *component,
367 struct snd_soc_pcm_runtime *rtd)
368{
369 struct snd_card *card = rtd->card->snd_card;
370 struct snd_pcm *pcm = rtd->pcm;
371 int ret;
372
373 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
374 if (ret)
375 return ret;
376
377 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
378 ret = preallocate_idma_buffer(pcm,
379 SNDRV_PCM_STREAM_PLAYBACK);
380 }
381
382 return ret;
383}
384
385void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr)
386{
387 spin_lock_init(&idma.lock);
388 idma.regs = regs;
389 idma.lp_tx_addr = addr;
390}
391EXPORT_SYMBOL_GPL(idma_reg_addr_init);
392
393static const struct snd_soc_component_driver asoc_idma_platform = {
394 .open = idma_open,
395 .close = idma_close,
396 .trigger = idma_trigger,
397 .pointer = idma_pointer,
398 .mmap = idma_mmap,
399 .hw_params = idma_hw_params,
400 .hw_free = idma_hw_free,
401 .prepare = idma_prepare,
402 .pcm_construct = idma_new,
403 .pcm_destruct = idma_free,
404};
405
406static int asoc_idma_platform_probe(struct platform_device *pdev)
407{
408 idma_irq = platform_get_irq(pdev, 0);
409 if (idma_irq < 0)
410 return idma_irq;
411
412 return devm_snd_soc_register_component(&pdev->dev, &asoc_idma_platform,
413 NULL, 0);
414}
415
416static struct platform_driver asoc_idma_driver = {
417 .driver = {
418 .name = "samsung-idma",
419 },
420
421 .probe = asoc_idma_platform_probe,
422};
423
424module_platform_driver(asoc_idma_driver);
425
426MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>");
427MODULE_DESCRIPTION("Samsung ASoC IDMA Driver");
428MODULE_LICENSE("GPL");