Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Topcliff PCH DMA controller driver
4 * Copyright (c) 2010 Intel Corporation
5 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
6 */
7
8#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
10#include <linux/init.h>
11#include <linux/pci.h>
12#include <linux/slab.h>
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/pch_dma.h>
16
17#include "dmaengine.h"
18
19#define DRV_NAME "pch-dma"
20
21#define DMA_CTL0_DISABLE 0x0
22#define DMA_CTL0_SG 0x1
23#define DMA_CTL0_ONESHOT 0x2
24#define DMA_CTL0_MODE_MASK_BITS 0x3
25#define DMA_CTL0_DIR_SHIFT_BITS 2
26#define DMA_CTL0_BITS_PER_CH 4
27
28#define DMA_CTL2_START_SHIFT_BITS 8
29#define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
30
31#define DMA_STATUS_IDLE 0x0
32#define DMA_STATUS_DESC_READ 0x1
33#define DMA_STATUS_WAIT 0x2
34#define DMA_STATUS_ACCESS 0x3
35#define DMA_STATUS_BITS_PER_CH 2
36#define DMA_STATUS_MASK_BITS 0x3
37#define DMA_STATUS_SHIFT_BITS 16
38#define DMA_STATUS_IRQ(x) (0x1 << (x))
39#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
40#define DMA_STATUS2_ERR(x) (0x1 << (x))
41
42#define DMA_DESC_WIDTH_SHIFT_BITS 12
43#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
44#define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
45#define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
46#define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
47#define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
48#define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
49#define DMA_DESC_END_WITHOUT_IRQ 0x0
50#define DMA_DESC_END_WITH_IRQ 0x1
51#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
52#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
53
54#define MAX_CHAN_NR 12
55
56#define DMA_MASK_CTL0_MODE 0x33333333
57#define DMA_MASK_CTL2_MODE 0x00003333
58
59static unsigned int init_nr_desc_per_channel = 64;
60module_param(init_nr_desc_per_channel, uint, 0644);
61MODULE_PARM_DESC(init_nr_desc_per_channel,
62 "initial descriptors per channel (default: 64)");
63
64struct pch_dma_desc_regs {
65 u32 dev_addr;
66 u32 mem_addr;
67 u32 size;
68 u32 next;
69};
70
71struct pch_dma_regs {
72 u32 dma_ctl0;
73 u32 dma_ctl1;
74 u32 dma_ctl2;
75 u32 dma_ctl3;
76 u32 dma_sts0;
77 u32 dma_sts1;
78 u32 dma_sts2;
79 u32 reserved3;
80 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
81};
82
83struct pch_dma_desc {
84 struct pch_dma_desc_regs regs;
85 struct dma_async_tx_descriptor txd;
86 struct list_head desc_node;
87 struct list_head tx_list;
88};
89
90struct pch_dma_chan {
91 struct dma_chan chan;
92 void __iomem *membase;
93 enum dma_transfer_direction dir;
94 struct tasklet_struct tasklet;
95 unsigned long err_status;
96
97 spinlock_t lock;
98
99 struct list_head active_list;
100 struct list_head queue;
101 struct list_head free_list;
102 unsigned int descs_allocated;
103};
104
105#define PDC_DEV_ADDR 0x00
106#define PDC_MEM_ADDR 0x04
107#define PDC_SIZE 0x08
108#define PDC_NEXT 0x0C
109
110#define channel_readl(pdc, name) \
111 readl((pdc)->membase + PDC_##name)
112#define channel_writel(pdc, name, val) \
113 writel((val), (pdc)->membase + PDC_##name)
114
115struct pch_dma {
116 struct dma_device dma;
117 void __iomem *membase;
118 struct dma_pool *pool;
119 struct pch_dma_regs regs;
120 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
121 struct pch_dma_chan channels[MAX_CHAN_NR];
122};
123
124#define PCH_DMA_CTL0 0x00
125#define PCH_DMA_CTL1 0x04
126#define PCH_DMA_CTL2 0x08
127#define PCH_DMA_CTL3 0x0C
128#define PCH_DMA_STS0 0x10
129#define PCH_DMA_STS1 0x14
130#define PCH_DMA_STS2 0x18
131
132#define dma_readl(pd, name) \
133 readl((pd)->membase + PCH_DMA_##name)
134#define dma_writel(pd, name, val) \
135 writel((val), (pd)->membase + PCH_DMA_##name)
136
137static inline
138struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
139{
140 return container_of(txd, struct pch_dma_desc, txd);
141}
142
143static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
144{
145 return container_of(chan, struct pch_dma_chan, chan);
146}
147
148static inline struct pch_dma *to_pd(struct dma_device *ddev)
149{
150 return container_of(ddev, struct pch_dma, dma);
151}
152
153static inline struct device *chan2dev(struct dma_chan *chan)
154{
155 return &chan->dev->device;
156}
157
158static inline struct device *chan2parent(struct dma_chan *chan)
159{
160 return chan->dev->device.parent;
161}
162
163static inline
164struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
165{
166 return list_first_entry(&pd_chan->active_list,
167 struct pch_dma_desc, desc_node);
168}
169
170static inline
171struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
172{
173 return list_first_entry(&pd_chan->queue,
174 struct pch_dma_desc, desc_node);
175}
176
177static void pdc_enable_irq(struct dma_chan *chan, int enable)
178{
179 struct pch_dma *pd = to_pd(chan->device);
180 u32 val;
181 int pos;
182
183 if (chan->chan_id < 8)
184 pos = chan->chan_id;
185 else
186 pos = chan->chan_id + 8;
187
188 val = dma_readl(pd, CTL2);
189
190 if (enable)
191 val |= 0x1 << pos;
192 else
193 val &= ~(0x1 << pos);
194
195 dma_writel(pd, CTL2, val);
196
197 dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
198 chan->chan_id, val);
199}
200
201static void pdc_set_dir(struct dma_chan *chan)
202{
203 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
204 struct pch_dma *pd = to_pd(chan->device);
205 u32 val;
206 u32 mask_mode;
207 u32 mask_ctl;
208
209 if (chan->chan_id < 8) {
210 val = dma_readl(pd, CTL0);
211
212 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
213 (DMA_CTL0_BITS_PER_CH * chan->chan_id);
214 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
215 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
216 val &= mask_mode;
217 if (pd_chan->dir == DMA_MEM_TO_DEV)
218 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
219 DMA_CTL0_DIR_SHIFT_BITS);
220 else
221 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
222 DMA_CTL0_DIR_SHIFT_BITS));
223
224 val |= mask_ctl;
225 dma_writel(pd, CTL0, val);
226 } else {
227 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
228 val = dma_readl(pd, CTL3);
229
230 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
231 (DMA_CTL0_BITS_PER_CH * ch);
232 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
233 (DMA_CTL0_BITS_PER_CH * ch));
234 val &= mask_mode;
235 if (pd_chan->dir == DMA_MEM_TO_DEV)
236 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
237 DMA_CTL0_DIR_SHIFT_BITS);
238 else
239 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
240 DMA_CTL0_DIR_SHIFT_BITS));
241 val |= mask_ctl;
242 dma_writel(pd, CTL3, val);
243 }
244
245 dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
246 chan->chan_id, val);
247}
248
249static void pdc_set_mode(struct dma_chan *chan, u32 mode)
250{
251 struct pch_dma *pd = to_pd(chan->device);
252 u32 val;
253 u32 mask_ctl;
254 u32 mask_dir;
255
256 if (chan->chan_id < 8) {
257 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
258 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
259 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
260 DMA_CTL0_DIR_SHIFT_BITS);
261 val = dma_readl(pd, CTL0);
262 val &= mask_dir;
263 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
264 val |= mask_ctl;
265 dma_writel(pd, CTL0, val);
266 } else {
267 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
268 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
269 (DMA_CTL0_BITS_PER_CH * ch));
270 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
271 DMA_CTL0_DIR_SHIFT_BITS);
272 val = dma_readl(pd, CTL3);
273 val &= mask_dir;
274 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
275 val |= mask_ctl;
276 dma_writel(pd, CTL3, val);
277 }
278
279 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
280 chan->chan_id, val);
281}
282
283static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
284{
285 struct pch_dma *pd = to_pd(pd_chan->chan.device);
286 u32 val;
287
288 val = dma_readl(pd, STS0);
289 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
290 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
291}
292
293static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
294{
295 struct pch_dma *pd = to_pd(pd_chan->chan.device);
296 u32 val;
297
298 val = dma_readl(pd, STS2);
299 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
300 DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
301}
302
303static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
304{
305 u32 sts;
306
307 if (pd_chan->chan.chan_id < 8)
308 sts = pdc_get_status0(pd_chan);
309 else
310 sts = pdc_get_status2(pd_chan);
311
312
313 if (sts == DMA_STATUS_IDLE)
314 return true;
315 else
316 return false;
317}
318
319static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
320{
321 if (!pdc_is_idle(pd_chan)) {
322 dev_err(chan2dev(&pd_chan->chan),
323 "BUG: Attempt to start non-idle channel\n");
324 return;
325 }
326
327 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
328 pd_chan->chan.chan_id, desc->regs.dev_addr);
329 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
330 pd_chan->chan.chan_id, desc->regs.mem_addr);
331 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
332 pd_chan->chan.chan_id, desc->regs.size);
333 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
334 pd_chan->chan.chan_id, desc->regs.next);
335
336 if (list_empty(&desc->tx_list)) {
337 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
338 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
339 channel_writel(pd_chan, SIZE, desc->regs.size);
340 channel_writel(pd_chan, NEXT, desc->regs.next);
341 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
342 } else {
343 channel_writel(pd_chan, NEXT, desc->txd.phys);
344 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
345 }
346}
347
348static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
349 struct pch_dma_desc *desc)
350{
351 struct dma_async_tx_descriptor *txd = &desc->txd;
352 struct dmaengine_desc_callback cb;
353
354 dmaengine_desc_get_callback(txd, &cb);
355 list_splice_init(&desc->tx_list, &pd_chan->free_list);
356 list_move(&desc->desc_node, &pd_chan->free_list);
357
358 dmaengine_desc_callback_invoke(&cb, NULL);
359}
360
361static void pdc_complete_all(struct pch_dma_chan *pd_chan)
362{
363 struct pch_dma_desc *desc, *_d;
364 LIST_HEAD(list);
365
366 BUG_ON(!pdc_is_idle(pd_chan));
367
368 if (!list_empty(&pd_chan->queue))
369 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
370
371 list_splice_init(&pd_chan->active_list, &list);
372 list_splice_init(&pd_chan->queue, &pd_chan->active_list);
373
374 list_for_each_entry_safe(desc, _d, &list, desc_node)
375 pdc_chain_complete(pd_chan, desc);
376}
377
378static void pdc_handle_error(struct pch_dma_chan *pd_chan)
379{
380 struct pch_dma_desc *bad_desc;
381
382 bad_desc = pdc_first_active(pd_chan);
383 list_del(&bad_desc->desc_node);
384
385 list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
386
387 if (!list_empty(&pd_chan->active_list))
388 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
389
390 dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
391 dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
392 bad_desc->txd.cookie);
393
394 pdc_chain_complete(pd_chan, bad_desc);
395}
396
397static void pdc_advance_work(struct pch_dma_chan *pd_chan)
398{
399 if (list_empty(&pd_chan->active_list) ||
400 list_is_singular(&pd_chan->active_list)) {
401 pdc_complete_all(pd_chan);
402 } else {
403 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
404 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
405 }
406}
407
408static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
409{
410 struct pch_dma_desc *desc = to_pd_desc(txd);
411 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
412
413 spin_lock(&pd_chan->lock);
414
415 if (list_empty(&pd_chan->active_list)) {
416 list_add_tail(&desc->desc_node, &pd_chan->active_list);
417 pdc_dostart(pd_chan, desc);
418 } else {
419 list_add_tail(&desc->desc_node, &pd_chan->queue);
420 }
421
422 spin_unlock(&pd_chan->lock);
423 return 0;
424}
425
426static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
427{
428 struct pch_dma_desc *desc = NULL;
429 struct pch_dma *pd = to_pd(chan->device);
430 dma_addr_t addr;
431
432 desc = dma_pool_zalloc(pd->pool, flags, &addr);
433 if (desc) {
434 INIT_LIST_HEAD(&desc->tx_list);
435 dma_async_tx_descriptor_init(&desc->txd, chan);
436 desc->txd.tx_submit = pd_tx_submit;
437 desc->txd.flags = DMA_CTRL_ACK;
438 desc->txd.phys = addr;
439 }
440
441 return desc;
442}
443
444static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
445{
446 struct pch_dma_desc *desc, *_d;
447 struct pch_dma_desc *ret = NULL;
448 int i = 0;
449
450 spin_lock(&pd_chan->lock);
451 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
452 i++;
453 if (async_tx_test_ack(&desc->txd)) {
454 list_del(&desc->desc_node);
455 ret = desc;
456 break;
457 }
458 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
459 }
460 spin_unlock(&pd_chan->lock);
461 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
462
463 if (!ret) {
464 ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
465 if (ret) {
466 spin_lock(&pd_chan->lock);
467 pd_chan->descs_allocated++;
468 spin_unlock(&pd_chan->lock);
469 } else {
470 dev_err(chan2dev(&pd_chan->chan),
471 "failed to alloc desc\n");
472 }
473 }
474
475 return ret;
476}
477
478static void pdc_desc_put(struct pch_dma_chan *pd_chan,
479 struct pch_dma_desc *desc)
480{
481 if (desc) {
482 spin_lock(&pd_chan->lock);
483 list_splice_init(&desc->tx_list, &pd_chan->free_list);
484 list_add(&desc->desc_node, &pd_chan->free_list);
485 spin_unlock(&pd_chan->lock);
486 }
487}
488
489static int pd_alloc_chan_resources(struct dma_chan *chan)
490{
491 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
492 struct pch_dma_desc *desc;
493 LIST_HEAD(tmp_list);
494 int i;
495
496 if (!pdc_is_idle(pd_chan)) {
497 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
498 return -EIO;
499 }
500
501 if (!list_empty(&pd_chan->free_list))
502 return pd_chan->descs_allocated;
503
504 for (i = 0; i < init_nr_desc_per_channel; i++) {
505 desc = pdc_alloc_desc(chan, GFP_KERNEL);
506
507 if (!desc) {
508 dev_warn(chan2dev(chan),
509 "Only allocated %d initial descriptors\n", i);
510 break;
511 }
512
513 list_add_tail(&desc->desc_node, &tmp_list);
514 }
515
516 spin_lock_irq(&pd_chan->lock);
517 list_splice(&tmp_list, &pd_chan->free_list);
518 pd_chan->descs_allocated = i;
519 dma_cookie_init(chan);
520 spin_unlock_irq(&pd_chan->lock);
521
522 pdc_enable_irq(chan, 1);
523
524 return pd_chan->descs_allocated;
525}
526
527static void pd_free_chan_resources(struct dma_chan *chan)
528{
529 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
530 struct pch_dma *pd = to_pd(chan->device);
531 struct pch_dma_desc *desc, *_d;
532 LIST_HEAD(tmp_list);
533
534 BUG_ON(!pdc_is_idle(pd_chan));
535 BUG_ON(!list_empty(&pd_chan->active_list));
536 BUG_ON(!list_empty(&pd_chan->queue));
537
538 spin_lock_irq(&pd_chan->lock);
539 list_splice_init(&pd_chan->free_list, &tmp_list);
540 pd_chan->descs_allocated = 0;
541 spin_unlock_irq(&pd_chan->lock);
542
543 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
544 dma_pool_free(pd->pool, desc, desc->txd.phys);
545
546 pdc_enable_irq(chan, 0);
547}
548
549static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
550 struct dma_tx_state *txstate)
551{
552 return dma_cookie_status(chan, cookie, txstate);
553}
554
555static void pd_issue_pending(struct dma_chan *chan)
556{
557 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
558
559 if (pdc_is_idle(pd_chan)) {
560 spin_lock(&pd_chan->lock);
561 pdc_advance_work(pd_chan);
562 spin_unlock(&pd_chan->lock);
563 }
564}
565
566static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
567 struct scatterlist *sgl, unsigned int sg_len,
568 enum dma_transfer_direction direction, unsigned long flags,
569 void *context)
570{
571 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
572 struct pch_dma_slave *pd_slave = chan->private;
573 struct pch_dma_desc *first = NULL;
574 struct pch_dma_desc *prev = NULL;
575 struct pch_dma_desc *desc = NULL;
576 struct scatterlist *sg;
577 dma_addr_t reg;
578 int i;
579
580 if (unlikely(!sg_len)) {
581 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
582 return NULL;
583 }
584
585 if (direction == DMA_DEV_TO_MEM)
586 reg = pd_slave->rx_reg;
587 else if (direction == DMA_MEM_TO_DEV)
588 reg = pd_slave->tx_reg;
589 else
590 return NULL;
591
592 pd_chan->dir = direction;
593 pdc_set_dir(chan);
594
595 for_each_sg(sgl, sg, sg_len, i) {
596 desc = pdc_desc_get(pd_chan);
597
598 if (!desc)
599 goto err_desc_get;
600
601 desc->regs.dev_addr = reg;
602 desc->regs.mem_addr = sg_dma_address(sg);
603 desc->regs.size = sg_dma_len(sg);
604 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
605
606 switch (pd_slave->width) {
607 case PCH_DMA_WIDTH_1_BYTE:
608 if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
609 goto err_desc_get;
610 desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
611 break;
612 case PCH_DMA_WIDTH_2_BYTES:
613 if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
614 goto err_desc_get;
615 desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
616 break;
617 case PCH_DMA_WIDTH_4_BYTES:
618 if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
619 goto err_desc_get;
620 desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
621 break;
622 default:
623 goto err_desc_get;
624 }
625
626 if (!first) {
627 first = desc;
628 } else {
629 prev->regs.next |= desc->txd.phys;
630 list_add_tail(&desc->desc_node, &first->tx_list);
631 }
632
633 prev = desc;
634 }
635
636 if (flags & DMA_PREP_INTERRUPT)
637 desc->regs.next = DMA_DESC_END_WITH_IRQ;
638 else
639 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
640
641 first->txd.cookie = -EBUSY;
642 desc->txd.flags = flags;
643
644 return &first->txd;
645
646err_desc_get:
647 dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
648 pdc_desc_put(pd_chan, first);
649 return NULL;
650}
651
652static int pd_device_terminate_all(struct dma_chan *chan)
653{
654 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
655 struct pch_dma_desc *desc, *_d;
656 LIST_HEAD(list);
657
658 spin_lock_irq(&pd_chan->lock);
659
660 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
661
662 list_splice_init(&pd_chan->active_list, &list);
663 list_splice_init(&pd_chan->queue, &list);
664
665 list_for_each_entry_safe(desc, _d, &list, desc_node)
666 pdc_chain_complete(pd_chan, desc);
667
668 spin_unlock_irq(&pd_chan->lock);
669
670 return 0;
671}
672
673static void pdc_tasklet(struct tasklet_struct *t)
674{
675 struct pch_dma_chan *pd_chan = from_tasklet(pd_chan, t, tasklet);
676 unsigned long flags;
677
678 if (!pdc_is_idle(pd_chan)) {
679 dev_err(chan2dev(&pd_chan->chan),
680 "BUG: handle non-idle channel in tasklet\n");
681 return;
682 }
683
684 spin_lock_irqsave(&pd_chan->lock, flags);
685 if (test_and_clear_bit(0, &pd_chan->err_status))
686 pdc_handle_error(pd_chan);
687 else
688 pdc_advance_work(pd_chan);
689 spin_unlock_irqrestore(&pd_chan->lock, flags);
690}
691
692static irqreturn_t pd_irq(int irq, void *devid)
693{
694 struct pch_dma *pd = (struct pch_dma *)devid;
695 struct pch_dma_chan *pd_chan;
696 u32 sts0;
697 u32 sts2;
698 int i;
699 int ret0 = IRQ_NONE;
700 int ret2 = IRQ_NONE;
701
702 sts0 = dma_readl(pd, STS0);
703 sts2 = dma_readl(pd, STS2);
704
705 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
706
707 for (i = 0; i < pd->dma.chancnt; i++) {
708 pd_chan = &pd->channels[i];
709
710 if (i < 8) {
711 if (sts0 & DMA_STATUS_IRQ(i)) {
712 if (sts0 & DMA_STATUS0_ERR(i))
713 set_bit(0, &pd_chan->err_status);
714
715 tasklet_schedule(&pd_chan->tasklet);
716 ret0 = IRQ_HANDLED;
717 }
718 } else {
719 if (sts2 & DMA_STATUS_IRQ(i - 8)) {
720 if (sts2 & DMA_STATUS2_ERR(i))
721 set_bit(0, &pd_chan->err_status);
722
723 tasklet_schedule(&pd_chan->tasklet);
724 ret2 = IRQ_HANDLED;
725 }
726 }
727 }
728
729 /* clear interrupt bits in status register */
730 if (ret0)
731 dma_writel(pd, STS0, sts0);
732 if (ret2)
733 dma_writel(pd, STS2, sts2);
734
735 return ret0 | ret2;
736}
737
738static void __maybe_unused pch_dma_save_regs(struct pch_dma *pd)
739{
740 struct pch_dma_chan *pd_chan;
741 struct dma_chan *chan, *_c;
742 int i = 0;
743
744 pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
745 pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
746 pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
747 pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
748
749 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
750 pd_chan = to_pd_chan(chan);
751
752 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
753 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
754 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
755 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
756
757 i++;
758 }
759}
760
761static void __maybe_unused pch_dma_restore_regs(struct pch_dma *pd)
762{
763 struct pch_dma_chan *pd_chan;
764 struct dma_chan *chan, *_c;
765 int i = 0;
766
767 dma_writel(pd, CTL0, pd->regs.dma_ctl0);
768 dma_writel(pd, CTL1, pd->regs.dma_ctl1);
769 dma_writel(pd, CTL2, pd->regs.dma_ctl2);
770 dma_writel(pd, CTL3, pd->regs.dma_ctl3);
771
772 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
773 pd_chan = to_pd_chan(chan);
774
775 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
776 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
777 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
778 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
779
780 i++;
781 }
782}
783
784static int __maybe_unused pch_dma_suspend(struct device *dev)
785{
786 struct pch_dma *pd = dev_get_drvdata(dev);
787
788 if (pd)
789 pch_dma_save_regs(pd);
790
791 return 0;
792}
793
794static int __maybe_unused pch_dma_resume(struct device *dev)
795{
796 struct pch_dma *pd = dev_get_drvdata(dev);
797
798 if (pd)
799 pch_dma_restore_regs(pd);
800
801 return 0;
802}
803
804static int pch_dma_probe(struct pci_dev *pdev,
805 const struct pci_device_id *id)
806{
807 struct pch_dma *pd;
808 struct pch_dma_regs *regs;
809 unsigned int nr_channels;
810 int err;
811 int i;
812
813 nr_channels = id->driver_data;
814 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
815 if (!pd)
816 return -ENOMEM;
817
818 pci_set_drvdata(pdev, pd);
819
820 err = pci_enable_device(pdev);
821 if (err) {
822 dev_err(&pdev->dev, "Cannot enable PCI device\n");
823 goto err_free_mem;
824 }
825
826 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
827 dev_err(&pdev->dev, "Cannot find proper base address\n");
828 err = -ENODEV;
829 goto err_disable_pdev;
830 }
831
832 err = pci_request_regions(pdev, DRV_NAME);
833 if (err) {
834 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
835 goto err_disable_pdev;
836 }
837
838 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
839 if (err) {
840 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
841 goto err_free_res;
842 }
843
844 regs = pd->membase = pci_iomap(pdev, 1, 0);
845 if (!pd->membase) {
846 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
847 err = -ENOMEM;
848 goto err_free_res;
849 }
850
851 pci_set_master(pdev);
852 pd->dma.dev = &pdev->dev;
853
854 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
855 if (err) {
856 dev_err(&pdev->dev, "Failed to request IRQ\n");
857 goto err_iounmap;
858 }
859
860 pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev,
861 sizeof(struct pch_dma_desc), 4, 0);
862 if (!pd->pool) {
863 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
864 err = -ENOMEM;
865 goto err_free_irq;
866 }
867
868
869 INIT_LIST_HEAD(&pd->dma.channels);
870
871 for (i = 0; i < nr_channels; i++) {
872 struct pch_dma_chan *pd_chan = &pd->channels[i];
873
874 pd_chan->chan.device = &pd->dma;
875 dma_cookie_init(&pd_chan->chan);
876
877 pd_chan->membase = ®s->desc[i];
878
879 spin_lock_init(&pd_chan->lock);
880
881 INIT_LIST_HEAD(&pd_chan->active_list);
882 INIT_LIST_HEAD(&pd_chan->queue);
883 INIT_LIST_HEAD(&pd_chan->free_list);
884
885 tasklet_setup(&pd_chan->tasklet, pdc_tasklet);
886 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
887 }
888
889 dma_cap_zero(pd->dma.cap_mask);
890 dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
891 dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
892
893 pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
894 pd->dma.device_free_chan_resources = pd_free_chan_resources;
895 pd->dma.device_tx_status = pd_tx_status;
896 pd->dma.device_issue_pending = pd_issue_pending;
897 pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
898 pd->dma.device_terminate_all = pd_device_terminate_all;
899
900 err = dma_async_device_register(&pd->dma);
901 if (err) {
902 dev_err(&pdev->dev, "Failed to register DMA device\n");
903 goto err_free_pool;
904 }
905
906 return 0;
907
908err_free_pool:
909 dma_pool_destroy(pd->pool);
910err_free_irq:
911 free_irq(pdev->irq, pd);
912err_iounmap:
913 pci_iounmap(pdev, pd->membase);
914err_free_res:
915 pci_release_regions(pdev);
916err_disable_pdev:
917 pci_disable_device(pdev);
918err_free_mem:
919 kfree(pd);
920 return err;
921}
922
923static void pch_dma_remove(struct pci_dev *pdev)
924{
925 struct pch_dma *pd = pci_get_drvdata(pdev);
926 struct pch_dma_chan *pd_chan;
927 struct dma_chan *chan, *_c;
928
929 if (pd) {
930 dma_async_device_unregister(&pd->dma);
931
932 free_irq(pdev->irq, pd);
933
934 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
935 device_node) {
936 pd_chan = to_pd_chan(chan);
937
938 tasklet_kill(&pd_chan->tasklet);
939 }
940
941 dma_pool_destroy(pd->pool);
942 pci_iounmap(pdev, pd->membase);
943 pci_release_regions(pdev);
944 pci_disable_device(pdev);
945 kfree(pd);
946 }
947}
948
949/* PCI Device ID of DMA device */
950#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
951#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
952#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
953#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
954#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
955#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
956#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
957#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
958#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
959#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
960#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
961#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
962
963static const struct pci_device_id pch_dma_id_table[] = {
964 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
965 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
966 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
967 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
968 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
969 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
970 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
971 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
972 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
973 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
974 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
975 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
976 { 0, },
977};
978
979static SIMPLE_DEV_PM_OPS(pch_dma_pm_ops, pch_dma_suspend, pch_dma_resume);
980
981static struct pci_driver pch_dma_driver = {
982 .name = DRV_NAME,
983 .id_table = pch_dma_id_table,
984 .probe = pch_dma_probe,
985 .remove = pch_dma_remove,
986 .driver.pm = &pch_dma_pm_ops,
987};
988
989module_pci_driver(pch_dma_driver);
990
991MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
992 "DMA controller driver");
993MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
994MODULE_LICENSE("GPL v2");
995MODULE_DEVICE_TABLE(pci, pch_dma_id_table);
1/*
2 * Topcliff PCH DMA controller driver
3 * Copyright (c) 2010 Intel Corporation
4 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/dmaengine.h>
21#include <linux/dma-mapping.h>
22#include <linux/init.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/pch_dma.h>
27
28#include "dmaengine.h"
29
30#define DRV_NAME "pch-dma"
31
32#define DMA_CTL0_DISABLE 0x0
33#define DMA_CTL0_SG 0x1
34#define DMA_CTL0_ONESHOT 0x2
35#define DMA_CTL0_MODE_MASK_BITS 0x3
36#define DMA_CTL0_DIR_SHIFT_BITS 2
37#define DMA_CTL0_BITS_PER_CH 4
38
39#define DMA_CTL2_START_SHIFT_BITS 8
40#define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
41
42#define DMA_STATUS_IDLE 0x0
43#define DMA_STATUS_DESC_READ 0x1
44#define DMA_STATUS_WAIT 0x2
45#define DMA_STATUS_ACCESS 0x3
46#define DMA_STATUS_BITS_PER_CH 2
47#define DMA_STATUS_MASK_BITS 0x3
48#define DMA_STATUS_SHIFT_BITS 16
49#define DMA_STATUS_IRQ(x) (0x1 << (x))
50#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
51#define DMA_STATUS2_ERR(x) (0x1 << (x))
52
53#define DMA_DESC_WIDTH_SHIFT_BITS 12
54#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
55#define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
56#define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
57#define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
58#define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
59#define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
60#define DMA_DESC_END_WITHOUT_IRQ 0x0
61#define DMA_DESC_END_WITH_IRQ 0x1
62#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
63#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
64
65#define MAX_CHAN_NR 12
66
67#define DMA_MASK_CTL0_MODE 0x33333333
68#define DMA_MASK_CTL2_MODE 0x00003333
69
70static unsigned int init_nr_desc_per_channel = 64;
71module_param(init_nr_desc_per_channel, uint, 0644);
72MODULE_PARM_DESC(init_nr_desc_per_channel,
73 "initial descriptors per channel (default: 64)");
74
75struct pch_dma_desc_regs {
76 u32 dev_addr;
77 u32 mem_addr;
78 u32 size;
79 u32 next;
80};
81
82struct pch_dma_regs {
83 u32 dma_ctl0;
84 u32 dma_ctl1;
85 u32 dma_ctl2;
86 u32 dma_ctl3;
87 u32 dma_sts0;
88 u32 dma_sts1;
89 u32 dma_sts2;
90 u32 reserved3;
91 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
92};
93
94struct pch_dma_desc {
95 struct pch_dma_desc_regs regs;
96 struct dma_async_tx_descriptor txd;
97 struct list_head desc_node;
98 struct list_head tx_list;
99};
100
101struct pch_dma_chan {
102 struct dma_chan chan;
103 void __iomem *membase;
104 enum dma_transfer_direction dir;
105 struct tasklet_struct tasklet;
106 unsigned long err_status;
107
108 spinlock_t lock;
109
110 struct list_head active_list;
111 struct list_head queue;
112 struct list_head free_list;
113 unsigned int descs_allocated;
114};
115
116#define PDC_DEV_ADDR 0x00
117#define PDC_MEM_ADDR 0x04
118#define PDC_SIZE 0x08
119#define PDC_NEXT 0x0C
120
121#define channel_readl(pdc, name) \
122 readl((pdc)->membase + PDC_##name)
123#define channel_writel(pdc, name, val) \
124 writel((val), (pdc)->membase + PDC_##name)
125
126struct pch_dma {
127 struct dma_device dma;
128 void __iomem *membase;
129 struct pci_pool *pool;
130 struct pch_dma_regs regs;
131 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
132 struct pch_dma_chan channels[MAX_CHAN_NR];
133};
134
135#define PCH_DMA_CTL0 0x00
136#define PCH_DMA_CTL1 0x04
137#define PCH_DMA_CTL2 0x08
138#define PCH_DMA_CTL3 0x0C
139#define PCH_DMA_STS0 0x10
140#define PCH_DMA_STS1 0x14
141#define PCH_DMA_STS2 0x18
142
143#define dma_readl(pd, name) \
144 readl((pd)->membase + PCH_DMA_##name)
145#define dma_writel(pd, name, val) \
146 writel((val), (pd)->membase + PCH_DMA_##name)
147
148static inline
149struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
150{
151 return container_of(txd, struct pch_dma_desc, txd);
152}
153
154static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
155{
156 return container_of(chan, struct pch_dma_chan, chan);
157}
158
159static inline struct pch_dma *to_pd(struct dma_device *ddev)
160{
161 return container_of(ddev, struct pch_dma, dma);
162}
163
164static inline struct device *chan2dev(struct dma_chan *chan)
165{
166 return &chan->dev->device;
167}
168
169static inline struct device *chan2parent(struct dma_chan *chan)
170{
171 return chan->dev->device.parent;
172}
173
174static inline
175struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
176{
177 return list_first_entry(&pd_chan->active_list,
178 struct pch_dma_desc, desc_node);
179}
180
181static inline
182struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
183{
184 return list_first_entry(&pd_chan->queue,
185 struct pch_dma_desc, desc_node);
186}
187
188static void pdc_enable_irq(struct dma_chan *chan, int enable)
189{
190 struct pch_dma *pd = to_pd(chan->device);
191 u32 val;
192 int pos;
193
194 if (chan->chan_id < 8)
195 pos = chan->chan_id;
196 else
197 pos = chan->chan_id + 8;
198
199 val = dma_readl(pd, CTL2);
200
201 if (enable)
202 val |= 0x1 << pos;
203 else
204 val &= ~(0x1 << pos);
205
206 dma_writel(pd, CTL2, val);
207
208 dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
209 chan->chan_id, val);
210}
211
212static void pdc_set_dir(struct dma_chan *chan)
213{
214 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
215 struct pch_dma *pd = to_pd(chan->device);
216 u32 val;
217 u32 mask_mode;
218 u32 mask_ctl;
219
220 if (chan->chan_id < 8) {
221 val = dma_readl(pd, CTL0);
222
223 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
224 (DMA_CTL0_BITS_PER_CH * chan->chan_id);
225 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
226 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
227 val &= mask_mode;
228 if (pd_chan->dir == DMA_MEM_TO_DEV)
229 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
230 DMA_CTL0_DIR_SHIFT_BITS);
231 else
232 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
233 DMA_CTL0_DIR_SHIFT_BITS));
234
235 val |= mask_ctl;
236 dma_writel(pd, CTL0, val);
237 } else {
238 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
239 val = dma_readl(pd, CTL3);
240
241 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
242 (DMA_CTL0_BITS_PER_CH * ch);
243 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
244 (DMA_CTL0_BITS_PER_CH * ch));
245 val &= mask_mode;
246 if (pd_chan->dir == DMA_MEM_TO_DEV)
247 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
248 DMA_CTL0_DIR_SHIFT_BITS);
249 else
250 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
251 DMA_CTL0_DIR_SHIFT_BITS));
252 val |= mask_ctl;
253 dma_writel(pd, CTL3, val);
254 }
255
256 dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
257 chan->chan_id, val);
258}
259
260static void pdc_set_mode(struct dma_chan *chan, u32 mode)
261{
262 struct pch_dma *pd = to_pd(chan->device);
263 u32 val;
264 u32 mask_ctl;
265 u32 mask_dir;
266
267 if (chan->chan_id < 8) {
268 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
269 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
270 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
271 DMA_CTL0_DIR_SHIFT_BITS);
272 val = dma_readl(pd, CTL0);
273 val &= mask_dir;
274 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
275 val |= mask_ctl;
276 dma_writel(pd, CTL0, val);
277 } else {
278 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
279 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
280 (DMA_CTL0_BITS_PER_CH * ch));
281 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
282 DMA_CTL0_DIR_SHIFT_BITS);
283 val = dma_readl(pd, CTL3);
284 val &= mask_dir;
285 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
286 val |= mask_ctl;
287 dma_writel(pd, CTL3, val);
288 }
289
290 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
291 chan->chan_id, val);
292}
293
294static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
295{
296 struct pch_dma *pd = to_pd(pd_chan->chan.device);
297 u32 val;
298
299 val = dma_readl(pd, STS0);
300 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
301 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
302}
303
304static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
305{
306 struct pch_dma *pd = to_pd(pd_chan->chan.device);
307 u32 val;
308
309 val = dma_readl(pd, STS2);
310 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
311 DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
312}
313
314static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
315{
316 u32 sts;
317
318 if (pd_chan->chan.chan_id < 8)
319 sts = pdc_get_status0(pd_chan);
320 else
321 sts = pdc_get_status2(pd_chan);
322
323
324 if (sts == DMA_STATUS_IDLE)
325 return true;
326 else
327 return false;
328}
329
330static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
331{
332 if (!pdc_is_idle(pd_chan)) {
333 dev_err(chan2dev(&pd_chan->chan),
334 "BUG: Attempt to start non-idle channel\n");
335 return;
336 }
337
338 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
339 pd_chan->chan.chan_id, desc->regs.dev_addr);
340 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
341 pd_chan->chan.chan_id, desc->regs.mem_addr);
342 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
343 pd_chan->chan.chan_id, desc->regs.size);
344 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
345 pd_chan->chan.chan_id, desc->regs.next);
346
347 if (list_empty(&desc->tx_list)) {
348 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
349 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
350 channel_writel(pd_chan, SIZE, desc->regs.size);
351 channel_writel(pd_chan, NEXT, desc->regs.next);
352 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
353 } else {
354 channel_writel(pd_chan, NEXT, desc->txd.phys);
355 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
356 }
357}
358
359static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
360 struct pch_dma_desc *desc)
361{
362 struct dma_async_tx_descriptor *txd = &desc->txd;
363 dma_async_tx_callback callback = txd->callback;
364 void *param = txd->callback_param;
365
366 list_splice_init(&desc->tx_list, &pd_chan->free_list);
367 list_move(&desc->desc_node, &pd_chan->free_list);
368
369 if (callback)
370 callback(param);
371}
372
373static void pdc_complete_all(struct pch_dma_chan *pd_chan)
374{
375 struct pch_dma_desc *desc, *_d;
376 LIST_HEAD(list);
377
378 BUG_ON(!pdc_is_idle(pd_chan));
379
380 if (!list_empty(&pd_chan->queue))
381 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
382
383 list_splice_init(&pd_chan->active_list, &list);
384 list_splice_init(&pd_chan->queue, &pd_chan->active_list);
385
386 list_for_each_entry_safe(desc, _d, &list, desc_node)
387 pdc_chain_complete(pd_chan, desc);
388}
389
390static void pdc_handle_error(struct pch_dma_chan *pd_chan)
391{
392 struct pch_dma_desc *bad_desc;
393
394 bad_desc = pdc_first_active(pd_chan);
395 list_del(&bad_desc->desc_node);
396
397 list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
398
399 if (!list_empty(&pd_chan->active_list))
400 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
401
402 dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
403 dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
404 bad_desc->txd.cookie);
405
406 pdc_chain_complete(pd_chan, bad_desc);
407}
408
409static void pdc_advance_work(struct pch_dma_chan *pd_chan)
410{
411 if (list_empty(&pd_chan->active_list) ||
412 list_is_singular(&pd_chan->active_list)) {
413 pdc_complete_all(pd_chan);
414 } else {
415 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
416 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
417 }
418}
419
420static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
421{
422 struct pch_dma_desc *desc = to_pd_desc(txd);
423 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
424 dma_cookie_t cookie;
425
426 spin_lock(&pd_chan->lock);
427 cookie = dma_cookie_assign(txd);
428
429 if (list_empty(&pd_chan->active_list)) {
430 list_add_tail(&desc->desc_node, &pd_chan->active_list);
431 pdc_dostart(pd_chan, desc);
432 } else {
433 list_add_tail(&desc->desc_node, &pd_chan->queue);
434 }
435
436 spin_unlock(&pd_chan->lock);
437 return 0;
438}
439
440static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
441{
442 struct pch_dma_desc *desc = NULL;
443 struct pch_dma *pd = to_pd(chan->device);
444 dma_addr_t addr;
445
446 desc = pci_pool_alloc(pd->pool, flags, &addr);
447 if (desc) {
448 memset(desc, 0, sizeof(struct pch_dma_desc));
449 INIT_LIST_HEAD(&desc->tx_list);
450 dma_async_tx_descriptor_init(&desc->txd, chan);
451 desc->txd.tx_submit = pd_tx_submit;
452 desc->txd.flags = DMA_CTRL_ACK;
453 desc->txd.phys = addr;
454 }
455
456 return desc;
457}
458
459static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
460{
461 struct pch_dma_desc *desc, *_d;
462 struct pch_dma_desc *ret = NULL;
463 int i = 0;
464
465 spin_lock(&pd_chan->lock);
466 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
467 i++;
468 if (async_tx_test_ack(&desc->txd)) {
469 list_del(&desc->desc_node);
470 ret = desc;
471 break;
472 }
473 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
474 }
475 spin_unlock(&pd_chan->lock);
476 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
477
478 if (!ret) {
479 ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
480 if (ret) {
481 spin_lock(&pd_chan->lock);
482 pd_chan->descs_allocated++;
483 spin_unlock(&pd_chan->lock);
484 } else {
485 dev_err(chan2dev(&pd_chan->chan),
486 "failed to alloc desc\n");
487 }
488 }
489
490 return ret;
491}
492
493static void pdc_desc_put(struct pch_dma_chan *pd_chan,
494 struct pch_dma_desc *desc)
495{
496 if (desc) {
497 spin_lock(&pd_chan->lock);
498 list_splice_init(&desc->tx_list, &pd_chan->free_list);
499 list_add(&desc->desc_node, &pd_chan->free_list);
500 spin_unlock(&pd_chan->lock);
501 }
502}
503
504static int pd_alloc_chan_resources(struct dma_chan *chan)
505{
506 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
507 struct pch_dma_desc *desc;
508 LIST_HEAD(tmp_list);
509 int i;
510
511 if (!pdc_is_idle(pd_chan)) {
512 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
513 return -EIO;
514 }
515
516 if (!list_empty(&pd_chan->free_list))
517 return pd_chan->descs_allocated;
518
519 for (i = 0; i < init_nr_desc_per_channel; i++) {
520 desc = pdc_alloc_desc(chan, GFP_KERNEL);
521
522 if (!desc) {
523 dev_warn(chan2dev(chan),
524 "Only allocated %d initial descriptors\n", i);
525 break;
526 }
527
528 list_add_tail(&desc->desc_node, &tmp_list);
529 }
530
531 spin_lock_irq(&pd_chan->lock);
532 list_splice(&tmp_list, &pd_chan->free_list);
533 pd_chan->descs_allocated = i;
534 dma_cookie_init(chan);
535 spin_unlock_irq(&pd_chan->lock);
536
537 pdc_enable_irq(chan, 1);
538
539 return pd_chan->descs_allocated;
540}
541
542static void pd_free_chan_resources(struct dma_chan *chan)
543{
544 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
545 struct pch_dma *pd = to_pd(chan->device);
546 struct pch_dma_desc *desc, *_d;
547 LIST_HEAD(tmp_list);
548
549 BUG_ON(!pdc_is_idle(pd_chan));
550 BUG_ON(!list_empty(&pd_chan->active_list));
551 BUG_ON(!list_empty(&pd_chan->queue));
552
553 spin_lock_irq(&pd_chan->lock);
554 list_splice_init(&pd_chan->free_list, &tmp_list);
555 pd_chan->descs_allocated = 0;
556 spin_unlock_irq(&pd_chan->lock);
557
558 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
559 pci_pool_free(pd->pool, desc, desc->txd.phys);
560
561 pdc_enable_irq(chan, 0);
562}
563
564static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
565 struct dma_tx_state *txstate)
566{
567 return dma_cookie_status(chan, cookie, txstate);
568}
569
570static void pd_issue_pending(struct dma_chan *chan)
571{
572 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
573
574 if (pdc_is_idle(pd_chan)) {
575 spin_lock(&pd_chan->lock);
576 pdc_advance_work(pd_chan);
577 spin_unlock(&pd_chan->lock);
578 }
579}
580
581static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
582 struct scatterlist *sgl, unsigned int sg_len,
583 enum dma_transfer_direction direction, unsigned long flags,
584 void *context)
585{
586 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
587 struct pch_dma_slave *pd_slave = chan->private;
588 struct pch_dma_desc *first = NULL;
589 struct pch_dma_desc *prev = NULL;
590 struct pch_dma_desc *desc = NULL;
591 struct scatterlist *sg;
592 dma_addr_t reg;
593 int i;
594
595 if (unlikely(!sg_len)) {
596 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
597 return NULL;
598 }
599
600 if (direction == DMA_DEV_TO_MEM)
601 reg = pd_slave->rx_reg;
602 else if (direction == DMA_MEM_TO_DEV)
603 reg = pd_slave->tx_reg;
604 else
605 return NULL;
606
607 pd_chan->dir = direction;
608 pdc_set_dir(chan);
609
610 for_each_sg(sgl, sg, sg_len, i) {
611 desc = pdc_desc_get(pd_chan);
612
613 if (!desc)
614 goto err_desc_get;
615
616 desc->regs.dev_addr = reg;
617 desc->regs.mem_addr = sg_dma_address(sg);
618 desc->regs.size = sg_dma_len(sg);
619 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
620
621 switch (pd_slave->width) {
622 case PCH_DMA_WIDTH_1_BYTE:
623 if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
624 goto err_desc_get;
625 desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
626 break;
627 case PCH_DMA_WIDTH_2_BYTES:
628 if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
629 goto err_desc_get;
630 desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
631 break;
632 case PCH_DMA_WIDTH_4_BYTES:
633 if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
634 goto err_desc_get;
635 desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
636 break;
637 default:
638 goto err_desc_get;
639 }
640
641 if (!first) {
642 first = desc;
643 } else {
644 prev->regs.next |= desc->txd.phys;
645 list_add_tail(&desc->desc_node, &first->tx_list);
646 }
647
648 prev = desc;
649 }
650
651 if (flags & DMA_PREP_INTERRUPT)
652 desc->regs.next = DMA_DESC_END_WITH_IRQ;
653 else
654 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
655
656 first->txd.cookie = -EBUSY;
657 desc->txd.flags = flags;
658
659 return &first->txd;
660
661err_desc_get:
662 dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
663 pdc_desc_put(pd_chan, first);
664 return NULL;
665}
666
667static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
668 unsigned long arg)
669{
670 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
671 struct pch_dma_desc *desc, *_d;
672 LIST_HEAD(list);
673
674 if (cmd != DMA_TERMINATE_ALL)
675 return -ENXIO;
676
677 spin_lock_irq(&pd_chan->lock);
678
679 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
680
681 list_splice_init(&pd_chan->active_list, &list);
682 list_splice_init(&pd_chan->queue, &list);
683
684 list_for_each_entry_safe(desc, _d, &list, desc_node)
685 pdc_chain_complete(pd_chan, desc);
686
687 spin_unlock_irq(&pd_chan->lock);
688
689 return 0;
690}
691
692static void pdc_tasklet(unsigned long data)
693{
694 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
695 unsigned long flags;
696
697 if (!pdc_is_idle(pd_chan)) {
698 dev_err(chan2dev(&pd_chan->chan),
699 "BUG: handle non-idle channel in tasklet\n");
700 return;
701 }
702
703 spin_lock_irqsave(&pd_chan->lock, flags);
704 if (test_and_clear_bit(0, &pd_chan->err_status))
705 pdc_handle_error(pd_chan);
706 else
707 pdc_advance_work(pd_chan);
708 spin_unlock_irqrestore(&pd_chan->lock, flags);
709}
710
711static irqreturn_t pd_irq(int irq, void *devid)
712{
713 struct pch_dma *pd = (struct pch_dma *)devid;
714 struct pch_dma_chan *pd_chan;
715 u32 sts0;
716 u32 sts2;
717 int i;
718 int ret0 = IRQ_NONE;
719 int ret2 = IRQ_NONE;
720
721 sts0 = dma_readl(pd, STS0);
722 sts2 = dma_readl(pd, STS2);
723
724 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
725
726 for (i = 0; i < pd->dma.chancnt; i++) {
727 pd_chan = &pd->channels[i];
728
729 if (i < 8) {
730 if (sts0 & DMA_STATUS_IRQ(i)) {
731 if (sts0 & DMA_STATUS0_ERR(i))
732 set_bit(0, &pd_chan->err_status);
733
734 tasklet_schedule(&pd_chan->tasklet);
735 ret0 = IRQ_HANDLED;
736 }
737 } else {
738 if (sts2 & DMA_STATUS_IRQ(i - 8)) {
739 if (sts2 & DMA_STATUS2_ERR(i))
740 set_bit(0, &pd_chan->err_status);
741
742 tasklet_schedule(&pd_chan->tasklet);
743 ret2 = IRQ_HANDLED;
744 }
745 }
746 }
747
748 /* clear interrupt bits in status register */
749 if (ret0)
750 dma_writel(pd, STS0, sts0);
751 if (ret2)
752 dma_writel(pd, STS2, sts2);
753
754 return ret0 | ret2;
755}
756
757#ifdef CONFIG_PM
758static void pch_dma_save_regs(struct pch_dma *pd)
759{
760 struct pch_dma_chan *pd_chan;
761 struct dma_chan *chan, *_c;
762 int i = 0;
763
764 pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
765 pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
766 pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
767 pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
768
769 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
770 pd_chan = to_pd_chan(chan);
771
772 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
773 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
774 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
775 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
776
777 i++;
778 }
779}
780
781static void pch_dma_restore_regs(struct pch_dma *pd)
782{
783 struct pch_dma_chan *pd_chan;
784 struct dma_chan *chan, *_c;
785 int i = 0;
786
787 dma_writel(pd, CTL0, pd->regs.dma_ctl0);
788 dma_writel(pd, CTL1, pd->regs.dma_ctl1);
789 dma_writel(pd, CTL2, pd->regs.dma_ctl2);
790 dma_writel(pd, CTL3, pd->regs.dma_ctl3);
791
792 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
793 pd_chan = to_pd_chan(chan);
794
795 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
796 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
797 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
798 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
799
800 i++;
801 }
802}
803
804static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
805{
806 struct pch_dma *pd = pci_get_drvdata(pdev);
807
808 if (pd)
809 pch_dma_save_regs(pd);
810
811 pci_save_state(pdev);
812 pci_disable_device(pdev);
813 pci_set_power_state(pdev, pci_choose_state(pdev, state));
814
815 return 0;
816}
817
818static int pch_dma_resume(struct pci_dev *pdev)
819{
820 struct pch_dma *pd = pci_get_drvdata(pdev);
821 int err;
822
823 pci_set_power_state(pdev, PCI_D0);
824 pci_restore_state(pdev);
825
826 err = pci_enable_device(pdev);
827 if (err) {
828 dev_dbg(&pdev->dev, "failed to enable device\n");
829 return err;
830 }
831
832 if (pd)
833 pch_dma_restore_regs(pd);
834
835 return 0;
836}
837#endif
838
839static int pch_dma_probe(struct pci_dev *pdev,
840 const struct pci_device_id *id)
841{
842 struct pch_dma *pd;
843 struct pch_dma_regs *regs;
844 unsigned int nr_channels;
845 int err;
846 int i;
847
848 nr_channels = id->driver_data;
849 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
850 if (!pd)
851 return -ENOMEM;
852
853 pci_set_drvdata(pdev, pd);
854
855 err = pci_enable_device(pdev);
856 if (err) {
857 dev_err(&pdev->dev, "Cannot enable PCI device\n");
858 goto err_free_mem;
859 }
860
861 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
862 dev_err(&pdev->dev, "Cannot find proper base address\n");
863 err = -ENODEV;
864 goto err_disable_pdev;
865 }
866
867 err = pci_request_regions(pdev, DRV_NAME);
868 if (err) {
869 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
870 goto err_disable_pdev;
871 }
872
873 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
874 if (err) {
875 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
876 goto err_free_res;
877 }
878
879 regs = pd->membase = pci_iomap(pdev, 1, 0);
880 if (!pd->membase) {
881 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
882 err = -ENOMEM;
883 goto err_free_res;
884 }
885
886 pci_set_master(pdev);
887
888 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
889 if (err) {
890 dev_err(&pdev->dev, "Failed to request IRQ\n");
891 goto err_iounmap;
892 }
893
894 pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
895 sizeof(struct pch_dma_desc), 4, 0);
896 if (!pd->pool) {
897 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
898 err = -ENOMEM;
899 goto err_free_irq;
900 }
901
902 pd->dma.dev = &pdev->dev;
903
904 INIT_LIST_HEAD(&pd->dma.channels);
905
906 for (i = 0; i < nr_channels; i++) {
907 struct pch_dma_chan *pd_chan = &pd->channels[i];
908
909 pd_chan->chan.device = &pd->dma;
910 dma_cookie_init(&pd_chan->chan);
911
912 pd_chan->membase = ®s->desc[i];
913
914 spin_lock_init(&pd_chan->lock);
915
916 INIT_LIST_HEAD(&pd_chan->active_list);
917 INIT_LIST_HEAD(&pd_chan->queue);
918 INIT_LIST_HEAD(&pd_chan->free_list);
919
920 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
921 (unsigned long)pd_chan);
922 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
923 }
924
925 dma_cap_zero(pd->dma.cap_mask);
926 dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
927 dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
928
929 pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
930 pd->dma.device_free_chan_resources = pd_free_chan_resources;
931 pd->dma.device_tx_status = pd_tx_status;
932 pd->dma.device_issue_pending = pd_issue_pending;
933 pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
934 pd->dma.device_control = pd_device_control;
935
936 err = dma_async_device_register(&pd->dma);
937 if (err) {
938 dev_err(&pdev->dev, "Failed to register DMA device\n");
939 goto err_free_pool;
940 }
941
942 return 0;
943
944err_free_pool:
945 pci_pool_destroy(pd->pool);
946err_free_irq:
947 free_irq(pdev->irq, pd);
948err_iounmap:
949 pci_iounmap(pdev, pd->membase);
950err_free_res:
951 pci_release_regions(pdev);
952err_disable_pdev:
953 pci_disable_device(pdev);
954err_free_mem:
955 return err;
956}
957
958static void pch_dma_remove(struct pci_dev *pdev)
959{
960 struct pch_dma *pd = pci_get_drvdata(pdev);
961 struct pch_dma_chan *pd_chan;
962 struct dma_chan *chan, *_c;
963
964 if (pd) {
965 dma_async_device_unregister(&pd->dma);
966
967 free_irq(pdev->irq, pd);
968
969 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
970 device_node) {
971 pd_chan = to_pd_chan(chan);
972
973 tasklet_kill(&pd_chan->tasklet);
974 }
975
976 pci_pool_destroy(pd->pool);
977 pci_iounmap(pdev, pd->membase);
978 pci_release_regions(pdev);
979 pci_disable_device(pdev);
980 kfree(pd);
981 }
982}
983
984/* PCI Device ID of DMA device */
985#define PCI_VENDOR_ID_ROHM 0x10DB
986#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
987#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
988#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
989#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
990#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
991#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
992#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
993#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
994#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
995#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
996#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
997#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
998
999DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
1000 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
1001 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
1002 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
1003 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
1004 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
1005 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
1006 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
1007 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1008 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1009 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
1010 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
1011 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
1012 { 0, },
1013};
1014
1015static struct pci_driver pch_dma_driver = {
1016 .name = DRV_NAME,
1017 .id_table = pch_dma_id_table,
1018 .probe = pch_dma_probe,
1019 .remove = pch_dma_remove,
1020#ifdef CONFIG_PM
1021 .suspend = pch_dma_suspend,
1022 .resume = pch_dma_resume,
1023#endif
1024};
1025
1026module_pci_driver(pch_dma_driver);
1027
1028MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1029 "DMA controller driver");
1030MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1031MODULE_LICENSE("GPL v2");
1032MODULE_DEVICE_TABLE(pci, pch_dma_id_table);