Loading...
1/*
2 * intel_mid_dma.c - Intel Langwell DMA Drivers
3 *
4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * The driver design is based on dw_dmac driver
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 *
25 */
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28#include <linux/pm_runtime.h>
29#include <linux/intel_mid_dma.h>
30#include <linux/module.h>
31
32#include "dmaengine.h"
33
34#define MAX_CHAN 4 /*max ch across controllers*/
35#include "intel_mid_dma_regs.h"
36
37#define INTEL_MID_DMAC1_ID 0x0814
38#define INTEL_MID_DMAC2_ID 0x0813
39#define INTEL_MID_GP_DMAC2_ID 0x0827
40#define INTEL_MFLD_DMAC1_ID 0x0830
41#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
42#define LNW_PERIPHRAL_MASK_SIZE 0x10
43#define LNW_PERIPHRAL_STATUS 0x0
44#define LNW_PERIPHRAL_MASK 0x8
45
46struct intel_mid_dma_probe_info {
47 u8 max_chan;
48 u8 ch_base;
49 u16 block_size;
50 u32 pimr_mask;
51};
52
53#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
54 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
55 .max_chan = (_max_chan), \
56 .ch_base = (_ch_base), \
57 .block_size = (_block_size), \
58 .pimr_mask = (_pimr_mask), \
59 })
60
61/*****************************************************************************
62Utility Functions*/
63/**
64 * get_ch_index - convert status to channel
65 * @status: status mask
66 * @base: dma ch base value
67 *
68 * Modify the status mask and return the channel index needing
69 * attention (or -1 if neither)
70 */
71static int get_ch_index(int *status, unsigned int base)
72{
73 int i;
74 for (i = 0; i < MAX_CHAN; i++) {
75 if (*status & (1 << (i + base))) {
76 *status = *status & ~(1 << (i + base));
77 pr_debug("MDMA: index %d New status %x\n", i, *status);
78 return i;
79 }
80 }
81 return -1;
82}
83
84/**
85 * get_block_ts - calculates dma transaction length
86 * @len: dma transfer length
87 * @tx_width: dma transfer src width
88 * @block_size: dma controller max block size
89 *
90 * Based on src width calculate the DMA trsaction length in data items
91 * return data items or FFFF if exceeds max length for block
92 */
93static int get_block_ts(int len, int tx_width, int block_size)
94{
95 int byte_width = 0, block_ts = 0;
96
97 switch (tx_width) {
98 case DMA_SLAVE_BUSWIDTH_1_BYTE:
99 byte_width = 1;
100 break;
101 case DMA_SLAVE_BUSWIDTH_2_BYTES:
102 byte_width = 2;
103 break;
104 case DMA_SLAVE_BUSWIDTH_4_BYTES:
105 default:
106 byte_width = 4;
107 break;
108 }
109
110 block_ts = len/byte_width;
111 if (block_ts > block_size)
112 block_ts = 0xFFFF;
113 return block_ts;
114}
115
116/*****************************************************************************
117DMAC1 interrupt Functions*/
118
119/**
120 * dmac1_mask_periphral_intr - mask the periphral interrupt
121 * @mid: dma device for which masking is required
122 *
123 * Masks the DMA periphral interrupt
124 * this is valid for DMAC1 family controllers only
125 * This controller should have periphral mask registers already mapped
126 */
127static void dmac1_mask_periphral_intr(struct middma_device *mid)
128{
129 u32 pimr;
130
131 if (mid->pimr_mask) {
132 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
133 pimr |= mid->pimr_mask;
134 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
135 }
136 return;
137}
138
139/**
140 * dmac1_unmask_periphral_intr - unmask the periphral interrupt
141 * @midc: dma channel for which masking is required
142 *
143 * UnMasks the DMA periphral interrupt,
144 * this is valid for DMAC1 family controllers only
145 * This controller should have periphral mask registers already mapped
146 */
147static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
148{
149 u32 pimr;
150 struct middma_device *mid = to_middma_device(midc->chan.device);
151
152 if (mid->pimr_mask) {
153 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
154 pimr &= ~mid->pimr_mask;
155 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
156 }
157 return;
158}
159
160/**
161 * enable_dma_interrupt - enable the periphral interrupt
162 * @midc: dma channel for which enable interrupt is required
163 *
164 * Enable the DMA periphral interrupt,
165 * this is valid for DMAC1 family controllers only
166 * This controller should have periphral mask registers already mapped
167 */
168static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
169{
170 dmac1_unmask_periphral_intr(midc);
171
172 /*en ch interrupts*/
173 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
174 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
175 return;
176}
177
178/**
179 * disable_dma_interrupt - disable the periphral interrupt
180 * @midc: dma channel for which disable interrupt is required
181 *
182 * Disable the DMA periphral interrupt,
183 * this is valid for DMAC1 family controllers only
184 * This controller should have periphral mask registers already mapped
185 */
186static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
187{
188 /*Check LPE PISR, make sure fwd is disabled*/
189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
191 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
192 return;
193}
194
195/*****************************************************************************
196DMA channel helper Functions*/
197/**
198 * mid_desc_get - get a descriptor
199 * @midc: dma channel for which descriptor is required
200 *
201 * Obtain a descriptor for the channel. Returns NULL if none are free.
202 * Once the descriptor is returned it is private until put on another
203 * list or freed
204 */
205static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
206{
207 struct intel_mid_dma_desc *desc, *_desc;
208 struct intel_mid_dma_desc *ret = NULL;
209
210 spin_lock_bh(&midc->lock);
211 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
212 if (async_tx_test_ack(&desc->txd)) {
213 list_del(&desc->desc_node);
214 ret = desc;
215 break;
216 }
217 }
218 spin_unlock_bh(&midc->lock);
219 return ret;
220}
221
222/**
223 * mid_desc_put - put a descriptor
224 * @midc: dma channel for which descriptor is required
225 * @desc: descriptor to put
226 *
227 * Return a descriptor from lwn_desc_get back to the free pool
228 */
229static void midc_desc_put(struct intel_mid_dma_chan *midc,
230 struct intel_mid_dma_desc *desc)
231{
232 if (desc) {
233 spin_lock_bh(&midc->lock);
234 list_add_tail(&desc->desc_node, &midc->free_list);
235 spin_unlock_bh(&midc->lock);
236 }
237}
238/**
239 * midc_dostart - begin a DMA transaction
240 * @midc: channel for which txn is to be started
241 * @first: first descriptor of series
242 *
243 * Load a transaction into the engine. This must be called with midc->lock
244 * held and bh disabled.
245 */
246static void midc_dostart(struct intel_mid_dma_chan *midc,
247 struct intel_mid_dma_desc *first)
248{
249 struct middma_device *mid = to_middma_device(midc->chan.device);
250
251 /* channel is idle */
252 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
253 /*error*/
254 pr_err("ERR_MDMA: channel is busy in start\n");
255 /* The tasklet will hopefully advance the queue... */
256 return;
257 }
258 midc->busy = true;
259 /*write registers and en*/
260 iowrite32(first->sar, midc->ch_regs + SAR);
261 iowrite32(first->dar, midc->ch_regs + DAR);
262 iowrite32(first->lli_phys, midc->ch_regs + LLP);
263 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
264 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
265 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
266 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
267 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
268 (int)first->sar, (int)first->dar, first->cfg_hi,
269 first->cfg_lo, first->ctl_hi, first->ctl_lo);
270 first->status = DMA_IN_PROGRESS;
271
272 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
273}
274
275/**
276 * midc_descriptor_complete - process completed descriptor
277 * @midc: channel owning the descriptor
278 * @desc: the descriptor itself
279 *
280 * Process a completed descriptor and perform any callbacks upon
281 * the completion. The completion handling drops the lock during the
282 * callbacks but must be called with the lock held.
283 */
284static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
285 struct intel_mid_dma_desc *desc)
286 __releases(&midc->lock) __acquires(&midc->lock)
287{
288 struct dma_async_tx_descriptor *txd = &desc->txd;
289 dma_async_tx_callback callback_txd = NULL;
290 struct intel_mid_dma_lli *llitem;
291 void *param_txd = NULL;
292
293 dma_cookie_complete(txd);
294 callback_txd = txd->callback;
295 param_txd = txd->callback_param;
296
297 if (desc->lli != NULL) {
298 /*clear the DONE bit of completed LLI in memory*/
299 llitem = desc->lli + desc->current_lli;
300 llitem->ctl_hi &= CLEAR_DONE;
301 if (desc->current_lli < desc->lli_length-1)
302 (desc->current_lli)++;
303 else
304 desc->current_lli = 0;
305 }
306 spin_unlock_bh(&midc->lock);
307 if (callback_txd) {
308 pr_debug("MDMA: TXD callback set ... calling\n");
309 callback_txd(param_txd);
310 }
311 if (midc->raw_tfr) {
312 desc->status = DMA_COMPLETE;
313 if (desc->lli != NULL) {
314 pci_pool_free(desc->lli_pool, desc->lli,
315 desc->lli_phys);
316 pci_pool_destroy(desc->lli_pool);
317 desc->lli = NULL;
318 }
319 list_move(&desc->desc_node, &midc->free_list);
320 midc->busy = false;
321 }
322 spin_lock_bh(&midc->lock);
323
324}
325/**
326 * midc_scan_descriptors - check the descriptors in channel
327 * mark completed when tx is completete
328 * @mid: device
329 * @midc: channel to scan
330 *
331 * Walk the descriptor chain for the device and process any entries
332 * that are complete.
333 */
334static void midc_scan_descriptors(struct middma_device *mid,
335 struct intel_mid_dma_chan *midc)
336{
337 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
338
339 /*tx is complete*/
340 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
341 if (desc->status == DMA_IN_PROGRESS)
342 midc_descriptor_complete(midc, desc);
343 }
344 return;
345 }
346/**
347 * midc_lli_fill_sg - Helper function to convert
348 * SG list to Linked List Items.
349 *@midc: Channel
350 *@desc: DMA descriptor
351 *@sglist: Pointer to SG list
352 *@sglen: SG list length
353 *@flags: DMA transaction flags
354 *
355 * Walk through the SG list and convert the SG list into Linked
356 * List Items (LLI).
357 */
358static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
359 struct intel_mid_dma_desc *desc,
360 struct scatterlist *sglist,
361 unsigned int sglen,
362 unsigned int flags)
363{
364 struct intel_mid_dma_slave *mids;
365 struct scatterlist *sg;
366 dma_addr_t lli_next, sg_phy_addr;
367 struct intel_mid_dma_lli *lli_bloc_desc;
368 union intel_mid_dma_ctl_lo ctl_lo;
369 union intel_mid_dma_ctl_hi ctl_hi;
370 int i;
371
372 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
373 mids = midc->mid_slave;
374
375 lli_bloc_desc = desc->lli;
376 lli_next = desc->lli_phys;
377
378 ctl_lo.ctl_lo = desc->ctl_lo;
379 ctl_hi.ctl_hi = desc->ctl_hi;
380 for_each_sg(sglist, sg, sglen, i) {
381 /*Populate CTL_LOW and LLI values*/
382 if (i != sglen - 1) {
383 lli_next = lli_next +
384 sizeof(struct intel_mid_dma_lli);
385 } else {
386 /*Check for circular list, otherwise terminate LLI to ZERO*/
387 if (flags & DMA_PREP_CIRCULAR_LIST) {
388 pr_debug("MDMA: LLI is configured in circular mode\n");
389 lli_next = desc->lli_phys;
390 } else {
391 lli_next = 0;
392 ctl_lo.ctlx.llp_dst_en = 0;
393 ctl_lo.ctlx.llp_src_en = 0;
394 }
395 }
396 /*Populate CTL_HI values*/
397 ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
398 desc->width,
399 midc->dma->block_size);
400 /*Populate SAR and DAR values*/
401 sg_phy_addr = sg_dma_address(sg);
402 if (desc->dirn == DMA_MEM_TO_DEV) {
403 lli_bloc_desc->sar = sg_phy_addr;
404 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
405 } else if (desc->dirn == DMA_DEV_TO_MEM) {
406 lli_bloc_desc->sar = mids->dma_slave.src_addr;
407 lli_bloc_desc->dar = sg_phy_addr;
408 }
409 /*Copy values into block descriptor in system memroy*/
410 lli_bloc_desc->llp = lli_next;
411 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
412 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
413
414 lli_bloc_desc++;
415 }
416 /*Copy very first LLI values to descriptor*/
417 desc->ctl_lo = desc->lli->ctl_lo;
418 desc->ctl_hi = desc->lli->ctl_hi;
419 desc->sar = desc->lli->sar;
420 desc->dar = desc->lli->dar;
421
422 return 0;
423}
424/*****************************************************************************
425DMA engine callback Functions*/
426/**
427 * intel_mid_dma_tx_submit - callback to submit DMA transaction
428 * @tx: dma engine descriptor
429 *
430 * Submit the DMA transaction for this descriptor, start if ch idle
431 */
432static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
433{
434 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
435 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
436 dma_cookie_t cookie;
437
438 spin_lock_bh(&midc->lock);
439 cookie = dma_cookie_assign(tx);
440
441 if (list_empty(&midc->active_list))
442 list_add_tail(&desc->desc_node, &midc->active_list);
443 else
444 list_add_tail(&desc->desc_node, &midc->queue);
445
446 midc_dostart(midc, desc);
447 spin_unlock_bh(&midc->lock);
448
449 return cookie;
450}
451
452/**
453 * intel_mid_dma_issue_pending - callback to issue pending txn
454 * @chan: chan where pending trascation needs to be checked and submitted
455 *
456 * Call for scan to issue pending descriptors
457 */
458static void intel_mid_dma_issue_pending(struct dma_chan *chan)
459{
460 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
461
462 spin_lock_bh(&midc->lock);
463 if (!list_empty(&midc->queue))
464 midc_scan_descriptors(to_middma_device(chan->device), midc);
465 spin_unlock_bh(&midc->lock);
466}
467
468/**
469 * intel_mid_dma_tx_status - Return status of txn
470 * @chan: chan for where status needs to be checked
471 * @cookie: cookie for txn
472 * @txstate: DMA txn state
473 *
474 * Return status of DMA txn
475 */
476static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
477 dma_cookie_t cookie,
478 struct dma_tx_state *txstate)
479{
480 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
481 enum dma_status ret;
482
483 ret = dma_cookie_status(chan, cookie, txstate);
484 if (ret != DMA_COMPLETE) {
485 spin_lock_bh(&midc->lock);
486 midc_scan_descriptors(to_middma_device(chan->device), midc);
487 spin_unlock_bh(&midc->lock);
488
489 ret = dma_cookie_status(chan, cookie, txstate);
490 }
491
492 return ret;
493}
494
495static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
496{
497 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
498 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
499 struct intel_mid_dma_slave *mid_slave;
500
501 BUG_ON(!midc);
502 BUG_ON(!slave);
503 pr_debug("MDMA: slave control called\n");
504
505 mid_slave = to_intel_mid_dma_slave(slave);
506
507 BUG_ON(!mid_slave);
508
509 midc->mid_slave = mid_slave;
510 return 0;
511}
512/**
513 * intel_mid_dma_device_control - DMA device control
514 * @chan: chan for DMA control
515 * @cmd: control cmd
516 * @arg: cmd arg value
517 *
518 * Perform DMA control command
519 */
520static int intel_mid_dma_device_control(struct dma_chan *chan,
521 enum dma_ctrl_cmd cmd, unsigned long arg)
522{
523 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
524 struct middma_device *mid = to_middma_device(chan->device);
525 struct intel_mid_dma_desc *desc, *_desc;
526 union intel_mid_dma_cfg_lo cfg_lo;
527
528 if (cmd == DMA_SLAVE_CONFIG)
529 return dma_slave_control(chan, arg);
530
531 if (cmd != DMA_TERMINATE_ALL)
532 return -ENXIO;
533
534 spin_lock_bh(&midc->lock);
535 if (midc->busy == false) {
536 spin_unlock_bh(&midc->lock);
537 return 0;
538 }
539 /*Suspend and disable the channel*/
540 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
541 cfg_lo.cfgx.ch_susp = 1;
542 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
543 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
544 midc->busy = false;
545 /* Disable interrupts */
546 disable_dma_interrupt(midc);
547 midc->descs_allocated = 0;
548
549 spin_unlock_bh(&midc->lock);
550 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
551 if (desc->lli != NULL) {
552 pci_pool_free(desc->lli_pool, desc->lli,
553 desc->lli_phys);
554 pci_pool_destroy(desc->lli_pool);
555 desc->lli = NULL;
556 }
557 list_move(&desc->desc_node, &midc->free_list);
558 }
559 return 0;
560}
561
562
563/**
564 * intel_mid_dma_prep_memcpy - Prep memcpy txn
565 * @chan: chan for DMA transfer
566 * @dest: destn address
567 * @src: src address
568 * @len: DMA transfer len
569 * @flags: DMA flags
570 *
571 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
572 * The periphral txn details should be filled in slave structure properly
573 * Returns the descriptor for this txn
574 */
575static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
576 struct dma_chan *chan, dma_addr_t dest,
577 dma_addr_t src, size_t len, unsigned long flags)
578{
579 struct intel_mid_dma_chan *midc;
580 struct intel_mid_dma_desc *desc = NULL;
581 struct intel_mid_dma_slave *mids;
582 union intel_mid_dma_ctl_lo ctl_lo;
583 union intel_mid_dma_ctl_hi ctl_hi;
584 union intel_mid_dma_cfg_lo cfg_lo;
585 union intel_mid_dma_cfg_hi cfg_hi;
586 enum dma_slave_buswidth width;
587
588 pr_debug("MDMA: Prep for memcpy\n");
589 BUG_ON(!chan);
590 if (!len)
591 return NULL;
592
593 midc = to_intel_mid_dma_chan(chan);
594 BUG_ON(!midc);
595
596 mids = midc->mid_slave;
597 BUG_ON(!mids);
598
599 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
600 midc->dma->pci_id, midc->ch_id, len);
601 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
602 mids->cfg_mode, mids->dma_slave.direction,
603 mids->hs_mode, mids->dma_slave.src_addr_width);
604
605 /*calculate CFG_LO*/
606 if (mids->hs_mode == LNW_DMA_SW_HS) {
607 cfg_lo.cfg_lo = 0;
608 cfg_lo.cfgx.hs_sel_dst = 1;
609 cfg_lo.cfgx.hs_sel_src = 1;
610 } else if (mids->hs_mode == LNW_DMA_HW_HS)
611 cfg_lo.cfg_lo = 0x00000;
612
613 /*calculate CFG_HI*/
614 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
615 /*SW HS only*/
616 cfg_hi.cfg_hi = 0;
617 } else {
618 cfg_hi.cfg_hi = 0;
619 if (midc->dma->pimr_mask) {
620 cfg_hi.cfgx.protctl = 0x0; /*default value*/
621 cfg_hi.cfgx.fifo_mode = 1;
622 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
623 cfg_hi.cfgx.src_per = 0;
624 if (mids->device_instance == 0)
625 cfg_hi.cfgx.dst_per = 3;
626 if (mids->device_instance == 1)
627 cfg_hi.cfgx.dst_per = 1;
628 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
629 if (mids->device_instance == 0)
630 cfg_hi.cfgx.src_per = 2;
631 if (mids->device_instance == 1)
632 cfg_hi.cfgx.src_per = 0;
633 cfg_hi.cfgx.dst_per = 0;
634 }
635 } else {
636 cfg_hi.cfgx.protctl = 0x1; /*default value*/
637 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
638 midc->ch_id - midc->dma->chan_base;
639 }
640 }
641
642 /*calculate CTL_HI*/
643 ctl_hi.ctlx.reser = 0;
644 ctl_hi.ctlx.done = 0;
645 width = mids->dma_slave.src_addr_width;
646
647 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
648 pr_debug("MDMA:calc len %d for block size %d\n",
649 ctl_hi.ctlx.block_ts, midc->dma->block_size);
650 /*calculate CTL_LO*/
651 ctl_lo.ctl_lo = 0;
652 ctl_lo.ctlx.int_en = 1;
653 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
654 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
655
656 /*
657 * Here we need some translation from "enum dma_slave_buswidth"
658 * to the format for our dma controller
659 * standard intel_mid_dmac's format
660 * 1 Byte 0b000
661 * 2 Bytes 0b001
662 * 4 Bytes 0b010
663 */
664 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
665 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
666
667 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
668 ctl_lo.ctlx.tt_fc = 0;
669 ctl_lo.ctlx.sinc = 0;
670 ctl_lo.ctlx.dinc = 0;
671 } else {
672 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
673 ctl_lo.ctlx.sinc = 0;
674 ctl_lo.ctlx.dinc = 2;
675 ctl_lo.ctlx.tt_fc = 1;
676 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
677 ctl_lo.ctlx.sinc = 2;
678 ctl_lo.ctlx.dinc = 0;
679 ctl_lo.ctlx.tt_fc = 2;
680 }
681 }
682
683 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
684 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
685
686 enable_dma_interrupt(midc);
687
688 desc = midc_desc_get(midc);
689 if (desc == NULL)
690 goto err_desc_get;
691 desc->sar = src;
692 desc->dar = dest ;
693 desc->len = len;
694 desc->cfg_hi = cfg_hi.cfg_hi;
695 desc->cfg_lo = cfg_lo.cfg_lo;
696 desc->ctl_lo = ctl_lo.ctl_lo;
697 desc->ctl_hi = ctl_hi.ctl_hi;
698 desc->width = width;
699 desc->dirn = mids->dma_slave.direction;
700 desc->lli_phys = 0;
701 desc->lli = NULL;
702 desc->lli_pool = NULL;
703 return &desc->txd;
704
705err_desc_get:
706 pr_err("ERR_MDMA: Failed to get desc\n");
707 midc_desc_put(midc, desc);
708 return NULL;
709}
710/**
711 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
712 * @chan: chan for DMA transfer
713 * @sgl: scatter gather list
714 * @sg_len: length of sg txn
715 * @direction: DMA transfer dirtn
716 * @flags: DMA flags
717 * @context: transfer context (ignored)
718 *
719 * Prepares LLI based periphral transfer
720 */
721static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
722 struct dma_chan *chan, struct scatterlist *sgl,
723 unsigned int sg_len, enum dma_transfer_direction direction,
724 unsigned long flags, void *context)
725{
726 struct intel_mid_dma_chan *midc = NULL;
727 struct intel_mid_dma_slave *mids = NULL;
728 struct intel_mid_dma_desc *desc = NULL;
729 struct dma_async_tx_descriptor *txd = NULL;
730 union intel_mid_dma_ctl_lo ctl_lo;
731
732 pr_debug("MDMA: Prep for slave SG\n");
733
734 if (!sg_len) {
735 pr_err("MDMA: Invalid SG length\n");
736 return NULL;
737 }
738 midc = to_intel_mid_dma_chan(chan);
739 BUG_ON(!midc);
740
741 mids = midc->mid_slave;
742 BUG_ON(!mids);
743
744 if (!midc->dma->pimr_mask) {
745 /* We can still handle sg list with only one item */
746 if (sg_len == 1) {
747 txd = intel_mid_dma_prep_memcpy(chan,
748 mids->dma_slave.dst_addr,
749 mids->dma_slave.src_addr,
750 sg_dma_len(sgl),
751 flags);
752 return txd;
753 } else {
754 pr_warn("MDMA: SG list is not supported by this controller\n");
755 return NULL;
756 }
757 }
758
759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
760 sg_len, direction, flags);
761
762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
763 if (NULL == txd) {
764 pr_err("MDMA: Prep memcpy failed\n");
765 return NULL;
766 }
767
768 desc = to_intel_mid_dma_desc(txd);
769 desc->dirn = direction;
770 ctl_lo.ctl_lo = desc->ctl_lo;
771 ctl_lo.ctlx.llp_dst_en = 1;
772 ctl_lo.ctlx.llp_src_en = 1;
773 desc->ctl_lo = ctl_lo.ctl_lo;
774 desc->lli_length = sg_len;
775 desc->current_lli = 0;
776 /* DMA coherent memory pool for LLI descriptors*/
777 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
778 midc->dma->pdev,
779 (sizeof(struct intel_mid_dma_lli)*sg_len),
780 32, 0);
781 if (NULL == desc->lli_pool) {
782 pr_err("MID_DMA:LLI pool create failed\n");
783 return NULL;
784 }
785
786 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
787 if (!desc->lli) {
788 pr_err("MID_DMA: LLI alloc failed\n");
789 pci_pool_destroy(desc->lli_pool);
790 return NULL;
791 }
792
793 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
794 if (flags & DMA_PREP_INTERRUPT) {
795 iowrite32(UNMASK_INTR_REG(midc->ch_id),
796 midc->dma_base + MASK_BLOCK);
797 pr_debug("MDMA:Enabled Block interrupt\n");
798 }
799 return &desc->txd;
800}
801
802/**
803 * intel_mid_dma_free_chan_resources - Frees dma resources
804 * @chan: chan requiring attention
805 *
806 * Frees the allocated resources on this DMA chan
807 */
808static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
809{
810 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
811 struct middma_device *mid = to_middma_device(chan->device);
812 struct intel_mid_dma_desc *desc, *_desc;
813
814 if (true == midc->busy) {
815 /*trying to free ch in use!!!!!*/
816 pr_err("ERR_MDMA: trying to free ch in use\n");
817 }
818 spin_lock_bh(&midc->lock);
819 midc->descs_allocated = 0;
820 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
821 list_del(&desc->desc_node);
822 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
823 }
824 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
825 list_del(&desc->desc_node);
826 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
827 }
828 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
829 list_del(&desc->desc_node);
830 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
831 }
832 spin_unlock_bh(&midc->lock);
833 midc->in_use = false;
834 midc->busy = false;
835 /* Disable CH interrupts */
836 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
837 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
838 pm_runtime_put(&mid->pdev->dev);
839}
840
841/**
842 * intel_mid_dma_alloc_chan_resources - Allocate dma resources
843 * @chan: chan requiring attention
844 *
845 * Allocates DMA resources on this chan
846 * Return the descriptors allocated
847 */
848static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
849{
850 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
851 struct middma_device *mid = to_middma_device(chan->device);
852 struct intel_mid_dma_desc *desc;
853 dma_addr_t phys;
854 int i = 0;
855
856 pm_runtime_get_sync(&mid->pdev->dev);
857
858 if (mid->state == SUSPENDED) {
859 if (dma_resume(&mid->pdev->dev)) {
860 pr_err("ERR_MDMA: resume failed");
861 return -EFAULT;
862 }
863 }
864
865 /* ASSERT: channel is idle */
866 if (test_ch_en(mid->dma_base, midc->ch_id)) {
867 /*ch is not idle*/
868 pr_err("ERR_MDMA: ch not idle\n");
869 pm_runtime_put(&mid->pdev->dev);
870 return -EIO;
871 }
872 dma_cookie_init(chan);
873
874 spin_lock_bh(&midc->lock);
875 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
876 spin_unlock_bh(&midc->lock);
877 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
878 if (!desc) {
879 pr_err("ERR_MDMA: desc failed\n");
880 pm_runtime_put(&mid->pdev->dev);
881 return -ENOMEM;
882 /*check*/
883 }
884 dma_async_tx_descriptor_init(&desc->txd, chan);
885 desc->txd.tx_submit = intel_mid_dma_tx_submit;
886 desc->txd.flags = DMA_CTRL_ACK;
887 desc->txd.phys = phys;
888 spin_lock_bh(&midc->lock);
889 i = ++midc->descs_allocated;
890 list_add_tail(&desc->desc_node, &midc->free_list);
891 }
892 spin_unlock_bh(&midc->lock);
893 midc->in_use = true;
894 midc->busy = false;
895 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
896 return i;
897}
898
899/**
900 * midc_handle_error - Handle DMA txn error
901 * @mid: controller where error occurred
902 * @midc: chan where error occurred
903 *
904 * Scan the descriptor for error
905 */
906static void midc_handle_error(struct middma_device *mid,
907 struct intel_mid_dma_chan *midc)
908{
909 midc_scan_descriptors(mid, midc);
910}
911
912/**
913 * dma_tasklet - DMA interrupt tasklet
914 * @data: tasklet arg (the controller structure)
915 *
916 * Scan the controller for interrupts for completion/error
917 * Clear the interrupt and call for handling completion/error
918 */
919static void dma_tasklet(unsigned long data)
920{
921 struct middma_device *mid = NULL;
922 struct intel_mid_dma_chan *midc = NULL;
923 u32 status, raw_tfr, raw_block;
924 int i;
925
926 mid = (struct middma_device *)data;
927 if (mid == NULL) {
928 pr_err("ERR_MDMA: tasklet Null param\n");
929 return;
930 }
931 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
932 raw_tfr = ioread32(mid->dma_base + RAW_TFR);
933 raw_block = ioread32(mid->dma_base + RAW_BLOCK);
934 status = raw_tfr | raw_block;
935 status &= mid->intr_mask;
936 while (status) {
937 /*txn interrupt*/
938 i = get_ch_index(&status, mid->chan_base);
939 if (i < 0) {
940 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
941 return;
942 }
943 midc = &mid->ch[i];
944 if (midc == NULL) {
945 pr_err("ERR_MDMA:Null param midc\n");
946 return;
947 }
948 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
949 status, midc->ch_id, i);
950 midc->raw_tfr = raw_tfr;
951 midc->raw_block = raw_block;
952 spin_lock_bh(&midc->lock);
953 /*clearing this interrupts first*/
954 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
955 if (raw_block) {
956 iowrite32((1 << midc->ch_id),
957 mid->dma_base + CLEAR_BLOCK);
958 }
959 midc_scan_descriptors(mid, midc);
960 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
961 iowrite32(UNMASK_INTR_REG(midc->ch_id),
962 mid->dma_base + MASK_TFR);
963 if (raw_block) {
964 iowrite32(UNMASK_INTR_REG(midc->ch_id),
965 mid->dma_base + MASK_BLOCK);
966 }
967 spin_unlock_bh(&midc->lock);
968 }
969
970 status = ioread32(mid->dma_base + RAW_ERR);
971 status &= mid->intr_mask;
972 while (status) {
973 /*err interrupt*/
974 i = get_ch_index(&status, mid->chan_base);
975 if (i < 0) {
976 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
977 return;
978 }
979 midc = &mid->ch[i];
980 if (midc == NULL) {
981 pr_err("ERR_MDMA:Null param midc\n");
982 return;
983 }
984 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
985 status, midc->ch_id, i);
986
987 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
988 spin_lock_bh(&midc->lock);
989 midc_handle_error(mid, midc);
990 iowrite32(UNMASK_INTR_REG(midc->ch_id),
991 mid->dma_base + MASK_ERR);
992 spin_unlock_bh(&midc->lock);
993 }
994 pr_debug("MDMA:Exiting takslet...\n");
995 return;
996}
997
998static void dma_tasklet1(unsigned long data)
999{
1000 pr_debug("MDMA:in takslet1...\n");
1001 return dma_tasklet(data);
1002}
1003
1004static void dma_tasklet2(unsigned long data)
1005{
1006 pr_debug("MDMA:in takslet2...\n");
1007 return dma_tasklet(data);
1008}
1009
1010/**
1011 * intel_mid_dma_interrupt - DMA ISR
1012 * @irq: IRQ where interrupt occurred
1013 * @data: ISR cllback data (the controller structure)
1014 *
1015 * See if this is our interrupt if so then schedule the tasklet
1016 * otherwise ignore
1017 */
1018static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1019{
1020 struct middma_device *mid = data;
1021 u32 tfr_status, err_status;
1022 int call_tasklet = 0;
1023
1024 tfr_status = ioread32(mid->dma_base + RAW_TFR);
1025 err_status = ioread32(mid->dma_base + RAW_ERR);
1026 if (!tfr_status && !err_status)
1027 return IRQ_NONE;
1028
1029 /*DMA Interrupt*/
1030 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
1031 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
1032 tfr_status &= mid->intr_mask;
1033 if (tfr_status) {
1034 /*need to disable intr*/
1035 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
1036 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
1037 pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
1038 call_tasklet = 1;
1039 }
1040 err_status &= mid->intr_mask;
1041 if (err_status) {
1042 iowrite32((err_status << INT_MASK_WE),
1043 mid->dma_base + MASK_ERR);
1044 call_tasklet = 1;
1045 }
1046 if (call_tasklet)
1047 tasklet_schedule(&mid->tasklet);
1048
1049 return IRQ_HANDLED;
1050}
1051
1052static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
1053{
1054 return intel_mid_dma_interrupt(irq, data);
1055}
1056
1057static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
1058{
1059 return intel_mid_dma_interrupt(irq, data);
1060}
1061
1062/**
1063 * mid_setup_dma - Setup the DMA controller
1064 * @pdev: Controller PCI device structure
1065 *
1066 * Initialize the DMA controller, channels, registers with DMA engine,
1067 * ISR. Initialize DMA controller channels.
1068 */
1069static int mid_setup_dma(struct pci_dev *pdev)
1070{
1071 struct middma_device *dma = pci_get_drvdata(pdev);
1072 int err, i;
1073
1074 /* DMA coherent memory pool for DMA descriptor allocations */
1075 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
1076 sizeof(struct intel_mid_dma_desc),
1077 32, 0);
1078 if (NULL == dma->dma_pool) {
1079 pr_err("ERR_MDMA:pci_pool_create failed\n");
1080 err = -ENOMEM;
1081 goto err_dma_pool;
1082 }
1083
1084 INIT_LIST_HEAD(&dma->common.channels);
1085 dma->pci_id = pdev->device;
1086 if (dma->pimr_mask) {
1087 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
1088 LNW_PERIPHRAL_MASK_SIZE);
1089 if (dma->mask_reg == NULL) {
1090 pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
1091 err = -ENOMEM;
1092 goto err_ioremap;
1093 }
1094 } else
1095 dma->mask_reg = NULL;
1096
1097 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
1098 /*init CH structures*/
1099 dma->intr_mask = 0;
1100 dma->state = RUNNING;
1101 for (i = 0; i < dma->max_chan; i++) {
1102 struct intel_mid_dma_chan *midch = &dma->ch[i];
1103
1104 midch->chan.device = &dma->common;
1105 dma_cookie_init(&midch->chan);
1106 midch->ch_id = dma->chan_base + i;
1107 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1108
1109 midch->dma_base = dma->dma_base;
1110 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
1111 midch->dma = dma;
1112 dma->intr_mask |= 1 << (dma->chan_base + i);
1113 spin_lock_init(&midch->lock);
1114
1115 INIT_LIST_HEAD(&midch->active_list);
1116 INIT_LIST_HEAD(&midch->queue);
1117 INIT_LIST_HEAD(&midch->free_list);
1118 /*mask interrupts*/
1119 iowrite32(MASK_INTR_REG(midch->ch_id),
1120 dma->dma_base + MASK_BLOCK);
1121 iowrite32(MASK_INTR_REG(midch->ch_id),
1122 dma->dma_base + MASK_SRC_TRAN);
1123 iowrite32(MASK_INTR_REG(midch->ch_id),
1124 dma->dma_base + MASK_DST_TRAN);
1125 iowrite32(MASK_INTR_REG(midch->ch_id),
1126 dma->dma_base + MASK_ERR);
1127 iowrite32(MASK_INTR_REG(midch->ch_id),
1128 dma->dma_base + MASK_TFR);
1129
1130 disable_dma_interrupt(midch);
1131 list_add_tail(&midch->chan.device_node, &dma->common.channels);
1132 }
1133 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
1134
1135 /*init dma structure*/
1136 dma_cap_zero(dma->common.cap_mask);
1137 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
1138 dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1139 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1140 dma->common.dev = &pdev->dev;
1141
1142 dma->common.device_alloc_chan_resources =
1143 intel_mid_dma_alloc_chan_resources;
1144 dma->common.device_free_chan_resources =
1145 intel_mid_dma_free_chan_resources;
1146
1147 dma->common.device_tx_status = intel_mid_dma_tx_status;
1148 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1149 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1150 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1151 dma->common.device_control = intel_mid_dma_device_control;
1152
1153 /*enable dma cntrl*/
1154 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
1155
1156 /*register irq */
1157 if (dma->pimr_mask) {
1158 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
1159 err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
1160 IRQF_SHARED, "INTEL_MID_DMAC1", dma);
1161 if (0 != err)
1162 goto err_irq;
1163 } else {
1164 dma->intr_mask = 0x03;
1165 pr_debug("MDMA:Requesting irq for DMAC2\n");
1166 err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
1167 IRQF_SHARED, "INTEL_MID_DMAC2", dma);
1168 if (0 != err)
1169 goto err_irq;
1170 }
1171 /*register device w/ engine*/
1172 err = dma_async_device_register(&dma->common);
1173 if (0 != err) {
1174 pr_err("ERR_MDMA:device_register failed: %d\n", err);
1175 goto err_engine;
1176 }
1177 if (dma->pimr_mask) {
1178 pr_debug("setting up tasklet1 for DMAC1\n");
1179 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
1180 } else {
1181 pr_debug("setting up tasklet2 for DMAC2\n");
1182 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
1183 }
1184 return 0;
1185
1186err_engine:
1187 free_irq(pdev->irq, dma);
1188err_irq:
1189 if (dma->mask_reg)
1190 iounmap(dma->mask_reg);
1191err_ioremap:
1192 pci_pool_destroy(dma->dma_pool);
1193err_dma_pool:
1194 pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
1195 return err;
1196
1197}
1198
1199/**
1200 * middma_shutdown - Shutdown the DMA controller
1201 * @pdev: Controller PCI device structure
1202 *
1203 * Called by remove
1204 * Unregister DMa controller, clear all structures and free interrupt
1205 */
1206static void middma_shutdown(struct pci_dev *pdev)
1207{
1208 struct middma_device *device = pci_get_drvdata(pdev);
1209
1210 dma_async_device_unregister(&device->common);
1211 pci_pool_destroy(device->dma_pool);
1212 if (device->mask_reg)
1213 iounmap(device->mask_reg);
1214 if (device->dma_base)
1215 iounmap(device->dma_base);
1216 free_irq(pdev->irq, device);
1217 return;
1218}
1219
1220/**
1221 * intel_mid_dma_probe - PCI Probe
1222 * @pdev: Controller PCI device structure
1223 * @id: pci device id structure
1224 *
1225 * Initialize the PCI device, map BARs, query driver data.
1226 * Call setup_dma to complete contoller and chan initilzation
1227 */
1228static int intel_mid_dma_probe(struct pci_dev *pdev,
1229 const struct pci_device_id *id)
1230{
1231 struct middma_device *device;
1232 u32 base_addr, bar_size;
1233 struct intel_mid_dma_probe_info *info;
1234 int err;
1235
1236 pr_debug("MDMA: probe for %x\n", pdev->device);
1237 info = (void *)id->driver_data;
1238 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1239 info->max_chan, info->ch_base,
1240 info->block_size, info->pimr_mask);
1241
1242 err = pci_enable_device(pdev);
1243 if (err)
1244 goto err_enable_device;
1245
1246 err = pci_request_regions(pdev, "intel_mid_dmac");
1247 if (err)
1248 goto err_request_regions;
1249
1250 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1251 if (err)
1252 goto err_set_dma_mask;
1253
1254 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1255 if (err)
1256 goto err_set_dma_mask;
1257
1258 device = kzalloc(sizeof(*device), GFP_KERNEL);
1259 if (!device) {
1260 pr_err("ERR_MDMA:kzalloc failed probe\n");
1261 err = -ENOMEM;
1262 goto err_kzalloc;
1263 }
1264 device->pdev = pci_dev_get(pdev);
1265
1266 base_addr = pci_resource_start(pdev, 0);
1267 bar_size = pci_resource_len(pdev, 0);
1268 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
1269 if (!device->dma_base) {
1270 pr_err("ERR_MDMA:ioremap failed\n");
1271 err = -ENOMEM;
1272 goto err_ioremap;
1273 }
1274 pci_set_drvdata(pdev, device);
1275 pci_set_master(pdev);
1276 device->max_chan = info->max_chan;
1277 device->chan_base = info->ch_base;
1278 device->block_size = info->block_size;
1279 device->pimr_mask = info->pimr_mask;
1280
1281 err = mid_setup_dma(pdev);
1282 if (err)
1283 goto err_dma;
1284
1285 pm_runtime_put_noidle(&pdev->dev);
1286 pm_runtime_allow(&pdev->dev);
1287 return 0;
1288
1289err_dma:
1290 iounmap(device->dma_base);
1291err_ioremap:
1292 pci_dev_put(pdev);
1293 kfree(device);
1294err_kzalloc:
1295err_set_dma_mask:
1296 pci_release_regions(pdev);
1297 pci_disable_device(pdev);
1298err_request_regions:
1299err_enable_device:
1300 pr_err("ERR_MDMA:Probe failed %d\n", err);
1301 return err;
1302}
1303
1304/**
1305 * intel_mid_dma_remove - PCI remove
1306 * @pdev: Controller PCI device structure
1307 *
1308 * Free up all resources and data
1309 * Call shutdown_dma to complete contoller and chan cleanup
1310 */
1311static void intel_mid_dma_remove(struct pci_dev *pdev)
1312{
1313 struct middma_device *device = pci_get_drvdata(pdev);
1314
1315 pm_runtime_get_noresume(&pdev->dev);
1316 pm_runtime_forbid(&pdev->dev);
1317 middma_shutdown(pdev);
1318 pci_dev_put(pdev);
1319 kfree(device);
1320 pci_release_regions(pdev);
1321 pci_disable_device(pdev);
1322}
1323
1324/* Power Management */
1325/*
1326* dma_suspend - PCI suspend function
1327*
1328* @pci: PCI device structure
1329* @state: PM message
1330*
1331* This function is called by OS when a power event occurs
1332*/
1333static int dma_suspend(struct device *dev)
1334{
1335 struct pci_dev *pci = to_pci_dev(dev);
1336 int i;
1337 struct middma_device *device = pci_get_drvdata(pci);
1338 pr_debug("MDMA: dma_suspend called\n");
1339
1340 for (i = 0; i < device->max_chan; i++) {
1341 if (device->ch[i].in_use)
1342 return -EAGAIN;
1343 }
1344 dmac1_mask_periphral_intr(device);
1345 device->state = SUSPENDED;
1346 pci_save_state(pci);
1347 pci_disable_device(pci);
1348 pci_set_power_state(pci, PCI_D3hot);
1349 return 0;
1350}
1351
1352/**
1353* dma_resume - PCI resume function
1354*
1355* @pci: PCI device structure
1356*
1357* This function is called by OS when a power event occurs
1358*/
1359int dma_resume(struct device *dev)
1360{
1361 struct pci_dev *pci = to_pci_dev(dev);
1362 int ret;
1363 struct middma_device *device = pci_get_drvdata(pci);
1364
1365 pr_debug("MDMA: dma_resume called\n");
1366 pci_set_power_state(pci, PCI_D0);
1367 pci_restore_state(pci);
1368 ret = pci_enable_device(pci);
1369 if (ret) {
1370 pr_err("MDMA: device can't be enabled for %x\n", pci->device);
1371 return ret;
1372 }
1373 device->state = RUNNING;
1374 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1375 return 0;
1376}
1377
1378static int dma_runtime_suspend(struct device *dev)
1379{
1380 struct pci_dev *pci_dev = to_pci_dev(dev);
1381 struct middma_device *device = pci_get_drvdata(pci_dev);
1382
1383 device->state = SUSPENDED;
1384 return 0;
1385}
1386
1387static int dma_runtime_resume(struct device *dev)
1388{
1389 struct pci_dev *pci_dev = to_pci_dev(dev);
1390 struct middma_device *device = pci_get_drvdata(pci_dev);
1391
1392 device->state = RUNNING;
1393 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1394 return 0;
1395}
1396
1397static int dma_runtime_idle(struct device *dev)
1398{
1399 struct pci_dev *pdev = to_pci_dev(dev);
1400 struct middma_device *device = pci_get_drvdata(pdev);
1401 int i;
1402
1403 for (i = 0; i < device->max_chan; i++) {
1404 if (device->ch[i].in_use)
1405 return -EAGAIN;
1406 }
1407
1408 return 0;
1409}
1410
1411/******************************************************************************
1412* PCI stuff
1413*/
1414static struct pci_device_id intel_mid_dma_ids[] = {
1415 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
1416 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
1417 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
1418 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
1419 { 0, }
1420};
1421MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1422
1423static const struct dev_pm_ops intel_mid_dma_pm = {
1424 .runtime_suspend = dma_runtime_suspend,
1425 .runtime_resume = dma_runtime_resume,
1426 .runtime_idle = dma_runtime_idle,
1427 .suspend = dma_suspend,
1428 .resume = dma_resume,
1429};
1430
1431static struct pci_driver intel_mid_dma_pci_driver = {
1432 .name = "Intel MID DMA",
1433 .id_table = intel_mid_dma_ids,
1434 .probe = intel_mid_dma_probe,
1435 .remove = intel_mid_dma_remove,
1436#ifdef CONFIG_PM
1437 .driver = {
1438 .pm = &intel_mid_dma_pm,
1439 },
1440#endif
1441};
1442
1443static int __init intel_mid_dma_init(void)
1444{
1445 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
1446 INTEL_MID_DMA_DRIVER_VERSION);
1447 return pci_register_driver(&intel_mid_dma_pci_driver);
1448}
1449fs_initcall(intel_mid_dma_init);
1450
1451static void __exit intel_mid_dma_exit(void)
1452{
1453 pci_unregister_driver(&intel_mid_dma_pci_driver);
1454}
1455module_exit(intel_mid_dma_exit);
1456
1457MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1458MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
1459MODULE_LICENSE("GPL v2");
1460MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
1/*
2 * intel_mid_dma.c - Intel Langwell DMA Drivers
3 *
4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * The driver design is based on dw_dmac driver
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 *
25 */
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28#include <linux/pm_runtime.h>
29#include <linux/intel_mid_dma.h>
30
31#define MAX_CHAN 4 /*max ch across controllers*/
32#include "intel_mid_dma_regs.h"
33
34#define INTEL_MID_DMAC1_ID 0x0814
35#define INTEL_MID_DMAC2_ID 0x0813
36#define INTEL_MID_GP_DMAC2_ID 0x0827
37#define INTEL_MFLD_DMAC1_ID 0x0830
38#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
39#define LNW_PERIPHRAL_MASK_SIZE 0x10
40#define LNW_PERIPHRAL_STATUS 0x0
41#define LNW_PERIPHRAL_MASK 0x8
42
43struct intel_mid_dma_probe_info {
44 u8 max_chan;
45 u8 ch_base;
46 u16 block_size;
47 u32 pimr_mask;
48};
49
50#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
51 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
52 .max_chan = (_max_chan), \
53 .ch_base = (_ch_base), \
54 .block_size = (_block_size), \
55 .pimr_mask = (_pimr_mask), \
56 })
57
58/*****************************************************************************
59Utility Functions*/
60/**
61 * get_ch_index - convert status to channel
62 * @status: status mask
63 * @base: dma ch base value
64 *
65 * Modify the status mask and return the channel index needing
66 * attention (or -1 if neither)
67 */
68static int get_ch_index(int *status, unsigned int base)
69{
70 int i;
71 for (i = 0; i < MAX_CHAN; i++) {
72 if (*status & (1 << (i + base))) {
73 *status = *status & ~(1 << (i + base));
74 pr_debug("MDMA: index %d New status %x\n", i, *status);
75 return i;
76 }
77 }
78 return -1;
79}
80
81/**
82 * get_block_ts - calculates dma transaction length
83 * @len: dma transfer length
84 * @tx_width: dma transfer src width
85 * @block_size: dma controller max block size
86 *
87 * Based on src width calculate the DMA trsaction length in data items
88 * return data items or FFFF if exceeds max length for block
89 */
90static int get_block_ts(int len, int tx_width, int block_size)
91{
92 int byte_width = 0, block_ts = 0;
93
94 switch (tx_width) {
95 case DMA_SLAVE_BUSWIDTH_1_BYTE:
96 byte_width = 1;
97 break;
98 case DMA_SLAVE_BUSWIDTH_2_BYTES:
99 byte_width = 2;
100 break;
101 case DMA_SLAVE_BUSWIDTH_4_BYTES:
102 default:
103 byte_width = 4;
104 break;
105 }
106
107 block_ts = len/byte_width;
108 if (block_ts > block_size)
109 block_ts = 0xFFFF;
110 return block_ts;
111}
112
113/*****************************************************************************
114DMAC1 interrupt Functions*/
115
116/**
117 * dmac1_mask_periphral_intr - mask the periphral interrupt
118 * @midc: dma channel for which masking is required
119 *
120 * Masks the DMA periphral interrupt
121 * this is valid for DMAC1 family controllers only
122 * This controller should have periphral mask registers already mapped
123 */
124static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
125{
126 u32 pimr;
127 struct middma_device *mid = to_middma_device(midc->chan.device);
128
129 if (mid->pimr_mask) {
130 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
131 pimr |= mid->pimr_mask;
132 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
133 }
134 return;
135}
136
137/**
138 * dmac1_unmask_periphral_intr - unmask the periphral interrupt
139 * @midc: dma channel for which masking is required
140 *
141 * UnMasks the DMA periphral interrupt,
142 * this is valid for DMAC1 family controllers only
143 * This controller should have periphral mask registers already mapped
144 */
145static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
146{
147 u32 pimr;
148 struct middma_device *mid = to_middma_device(midc->chan.device);
149
150 if (mid->pimr_mask) {
151 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
152 pimr &= ~mid->pimr_mask;
153 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
154 }
155 return;
156}
157
158/**
159 * enable_dma_interrupt - enable the periphral interrupt
160 * @midc: dma channel for which enable interrupt is required
161 *
162 * Enable the DMA periphral interrupt,
163 * this is valid for DMAC1 family controllers only
164 * This controller should have periphral mask registers already mapped
165 */
166static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
167{
168 dmac1_unmask_periphral_intr(midc);
169
170 /*en ch interrupts*/
171 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
172 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
173 return;
174}
175
176/**
177 * disable_dma_interrupt - disable the periphral interrupt
178 * @midc: dma channel for which disable interrupt is required
179 *
180 * Disable the DMA periphral interrupt,
181 * this is valid for DMAC1 family controllers only
182 * This controller should have periphral mask registers already mapped
183 */
184static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
185{
186 /*Check LPE PISR, make sure fwd is disabled*/
187 dmac1_mask_periphral_intr(midc);
188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
191 return;
192}
193
194/*****************************************************************************
195DMA channel helper Functions*/
196/**
197 * mid_desc_get - get a descriptor
198 * @midc: dma channel for which descriptor is required
199 *
200 * Obtain a descriptor for the channel. Returns NULL if none are free.
201 * Once the descriptor is returned it is private until put on another
202 * list or freed
203 */
204static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
205{
206 struct intel_mid_dma_desc *desc, *_desc;
207 struct intel_mid_dma_desc *ret = NULL;
208
209 spin_lock_bh(&midc->lock);
210 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
211 if (async_tx_test_ack(&desc->txd)) {
212 list_del(&desc->desc_node);
213 ret = desc;
214 break;
215 }
216 }
217 spin_unlock_bh(&midc->lock);
218 return ret;
219}
220
221/**
222 * mid_desc_put - put a descriptor
223 * @midc: dma channel for which descriptor is required
224 * @desc: descriptor to put
225 *
226 * Return a descriptor from lwn_desc_get back to the free pool
227 */
228static void midc_desc_put(struct intel_mid_dma_chan *midc,
229 struct intel_mid_dma_desc *desc)
230{
231 if (desc) {
232 spin_lock_bh(&midc->lock);
233 list_add_tail(&desc->desc_node, &midc->free_list);
234 spin_unlock_bh(&midc->lock);
235 }
236}
237/**
238 * midc_dostart - begin a DMA transaction
239 * @midc: channel for which txn is to be started
240 * @first: first descriptor of series
241 *
242 * Load a transaction into the engine. This must be called with midc->lock
243 * held and bh disabled.
244 */
245static void midc_dostart(struct intel_mid_dma_chan *midc,
246 struct intel_mid_dma_desc *first)
247{
248 struct middma_device *mid = to_middma_device(midc->chan.device);
249
250 /* channel is idle */
251 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
252 /*error*/
253 pr_err("ERR_MDMA: channel is busy in start\n");
254 /* The tasklet will hopefully advance the queue... */
255 return;
256 }
257 midc->busy = true;
258 /*write registers and en*/
259 iowrite32(first->sar, midc->ch_regs + SAR);
260 iowrite32(first->dar, midc->ch_regs + DAR);
261 iowrite32(first->lli_phys, midc->ch_regs + LLP);
262 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
263 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
264 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
265 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
266 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
267 (int)first->sar, (int)first->dar, first->cfg_hi,
268 first->cfg_lo, first->ctl_hi, first->ctl_lo);
269 first->status = DMA_IN_PROGRESS;
270
271 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
272}
273
274/**
275 * midc_descriptor_complete - process completed descriptor
276 * @midc: channel owning the descriptor
277 * @desc: the descriptor itself
278 *
279 * Process a completed descriptor and perform any callbacks upon
280 * the completion. The completion handling drops the lock during the
281 * callbacks but must be called with the lock held.
282 */
283static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
284 struct intel_mid_dma_desc *desc)
285{
286 struct dma_async_tx_descriptor *txd = &desc->txd;
287 dma_async_tx_callback callback_txd = NULL;
288 struct intel_mid_dma_lli *llitem;
289 void *param_txd = NULL;
290
291 midc->completed = txd->cookie;
292 callback_txd = txd->callback;
293 param_txd = txd->callback_param;
294
295 if (desc->lli != NULL) {
296 /*clear the DONE bit of completed LLI in memory*/
297 llitem = desc->lli + desc->current_lli;
298 llitem->ctl_hi &= CLEAR_DONE;
299 if (desc->current_lli < desc->lli_length-1)
300 (desc->current_lli)++;
301 else
302 desc->current_lli = 0;
303 }
304 spin_unlock_bh(&midc->lock);
305 if (callback_txd) {
306 pr_debug("MDMA: TXD callback set ... calling\n");
307 callback_txd(param_txd);
308 }
309 if (midc->raw_tfr) {
310 desc->status = DMA_SUCCESS;
311 if (desc->lli != NULL) {
312 pci_pool_free(desc->lli_pool, desc->lli,
313 desc->lli_phys);
314 pci_pool_destroy(desc->lli_pool);
315 }
316 list_move(&desc->desc_node, &midc->free_list);
317 midc->busy = false;
318 }
319 spin_lock_bh(&midc->lock);
320
321}
322/**
323 * midc_scan_descriptors - check the descriptors in channel
324 * mark completed when tx is completete
325 * @mid: device
326 * @midc: channel to scan
327 *
328 * Walk the descriptor chain for the device and process any entries
329 * that are complete.
330 */
331static void midc_scan_descriptors(struct middma_device *mid,
332 struct intel_mid_dma_chan *midc)
333{
334 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
335
336 /*tx is complete*/
337 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
338 if (desc->status == DMA_IN_PROGRESS)
339 midc_descriptor_complete(midc, desc);
340 }
341 return;
342 }
343/**
344 * midc_lli_fill_sg - Helper function to convert
345 * SG list to Linked List Items.
346 *@midc: Channel
347 *@desc: DMA descriptor
348 *@sglist: Pointer to SG list
349 *@sglen: SG list length
350 *@flags: DMA transaction flags
351 *
352 * Walk through the SG list and convert the SG list into Linked
353 * List Items (LLI).
354 */
355static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
356 struct intel_mid_dma_desc *desc,
357 struct scatterlist *sglist,
358 unsigned int sglen,
359 unsigned int flags)
360{
361 struct intel_mid_dma_slave *mids;
362 struct scatterlist *sg;
363 dma_addr_t lli_next, sg_phy_addr;
364 struct intel_mid_dma_lli *lli_bloc_desc;
365 union intel_mid_dma_ctl_lo ctl_lo;
366 union intel_mid_dma_ctl_hi ctl_hi;
367 int i;
368
369 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
370 mids = midc->mid_slave;
371
372 lli_bloc_desc = desc->lli;
373 lli_next = desc->lli_phys;
374
375 ctl_lo.ctl_lo = desc->ctl_lo;
376 ctl_hi.ctl_hi = desc->ctl_hi;
377 for_each_sg(sglist, sg, sglen, i) {
378 /*Populate CTL_LOW and LLI values*/
379 if (i != sglen - 1) {
380 lli_next = lli_next +
381 sizeof(struct intel_mid_dma_lli);
382 } else {
383 /*Check for circular list, otherwise terminate LLI to ZERO*/
384 if (flags & DMA_PREP_CIRCULAR_LIST) {
385 pr_debug("MDMA: LLI is configured in circular mode\n");
386 lli_next = desc->lli_phys;
387 } else {
388 lli_next = 0;
389 ctl_lo.ctlx.llp_dst_en = 0;
390 ctl_lo.ctlx.llp_src_en = 0;
391 }
392 }
393 /*Populate CTL_HI values*/
394 ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
395 desc->width,
396 midc->dma->block_size);
397 /*Populate SAR and DAR values*/
398 sg_phy_addr = sg_phys(sg);
399 if (desc->dirn == DMA_TO_DEVICE) {
400 lli_bloc_desc->sar = sg_phy_addr;
401 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
402 } else if (desc->dirn == DMA_FROM_DEVICE) {
403 lli_bloc_desc->sar = mids->dma_slave.src_addr;
404 lli_bloc_desc->dar = sg_phy_addr;
405 }
406 /*Copy values into block descriptor in system memroy*/
407 lli_bloc_desc->llp = lli_next;
408 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
409 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
410
411 lli_bloc_desc++;
412 }
413 /*Copy very first LLI values to descriptor*/
414 desc->ctl_lo = desc->lli->ctl_lo;
415 desc->ctl_hi = desc->lli->ctl_hi;
416 desc->sar = desc->lli->sar;
417 desc->dar = desc->lli->dar;
418
419 return 0;
420}
421/*****************************************************************************
422DMA engine callback Functions*/
423/**
424 * intel_mid_dma_tx_submit - callback to submit DMA transaction
425 * @tx: dma engine descriptor
426 *
427 * Submit the DMA trasaction for this descriptor, start if ch idle
428 */
429static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
430{
431 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
432 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
433 dma_cookie_t cookie;
434
435 spin_lock_bh(&midc->lock);
436 cookie = midc->chan.cookie;
437
438 if (++cookie < 0)
439 cookie = 1;
440
441 midc->chan.cookie = cookie;
442 desc->txd.cookie = cookie;
443
444
445 if (list_empty(&midc->active_list))
446 list_add_tail(&desc->desc_node, &midc->active_list);
447 else
448 list_add_tail(&desc->desc_node, &midc->queue);
449
450 midc_dostart(midc, desc);
451 spin_unlock_bh(&midc->lock);
452
453 return cookie;
454}
455
456/**
457 * intel_mid_dma_issue_pending - callback to issue pending txn
458 * @chan: chan where pending trascation needs to be checked and submitted
459 *
460 * Call for scan to issue pending descriptors
461 */
462static void intel_mid_dma_issue_pending(struct dma_chan *chan)
463{
464 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
465
466 spin_lock_bh(&midc->lock);
467 if (!list_empty(&midc->queue))
468 midc_scan_descriptors(to_middma_device(chan->device), midc);
469 spin_unlock_bh(&midc->lock);
470}
471
472/**
473 * intel_mid_dma_tx_status - Return status of txn
474 * @chan: chan for where status needs to be checked
475 * @cookie: cookie for txn
476 * @txstate: DMA txn state
477 *
478 * Return status of DMA txn
479 */
480static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
481 dma_cookie_t cookie,
482 struct dma_tx_state *txstate)
483{
484 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
485 dma_cookie_t last_used;
486 dma_cookie_t last_complete;
487 int ret;
488
489 last_complete = midc->completed;
490 last_used = chan->cookie;
491
492 ret = dma_async_is_complete(cookie, last_complete, last_used);
493 if (ret != DMA_SUCCESS) {
494 midc_scan_descriptors(to_middma_device(chan->device), midc);
495
496 last_complete = midc->completed;
497 last_used = chan->cookie;
498
499 ret = dma_async_is_complete(cookie, last_complete, last_used);
500 }
501
502 if (txstate) {
503 txstate->last = last_complete;
504 txstate->used = last_used;
505 txstate->residue = 0;
506 }
507 return ret;
508}
509
510static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
511{
512 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
513 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
514 struct intel_mid_dma_slave *mid_slave;
515
516 BUG_ON(!midc);
517 BUG_ON(!slave);
518 pr_debug("MDMA: slave control called\n");
519
520 mid_slave = to_intel_mid_dma_slave(slave);
521
522 BUG_ON(!mid_slave);
523
524 midc->mid_slave = mid_slave;
525 return 0;
526}
527/**
528 * intel_mid_dma_device_control - DMA device control
529 * @chan: chan for DMA control
530 * @cmd: control cmd
531 * @arg: cmd arg value
532 *
533 * Perform DMA control command
534 */
535static int intel_mid_dma_device_control(struct dma_chan *chan,
536 enum dma_ctrl_cmd cmd, unsigned long arg)
537{
538 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
539 struct middma_device *mid = to_middma_device(chan->device);
540 struct intel_mid_dma_desc *desc, *_desc;
541 union intel_mid_dma_cfg_lo cfg_lo;
542
543 if (cmd == DMA_SLAVE_CONFIG)
544 return dma_slave_control(chan, arg);
545
546 if (cmd != DMA_TERMINATE_ALL)
547 return -ENXIO;
548
549 spin_lock_bh(&midc->lock);
550 if (midc->busy == false) {
551 spin_unlock_bh(&midc->lock);
552 return 0;
553 }
554 /*Suspend and disable the channel*/
555 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
556 cfg_lo.cfgx.ch_susp = 1;
557 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
558 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
559 midc->busy = false;
560 /* Disable interrupts */
561 disable_dma_interrupt(midc);
562 midc->descs_allocated = 0;
563
564 spin_unlock_bh(&midc->lock);
565 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
566 if (desc->lli != NULL) {
567 pci_pool_free(desc->lli_pool, desc->lli,
568 desc->lli_phys);
569 pci_pool_destroy(desc->lli_pool);
570 }
571 list_move(&desc->desc_node, &midc->free_list);
572 }
573 return 0;
574}
575
576
577/**
578 * intel_mid_dma_prep_memcpy - Prep memcpy txn
579 * @chan: chan for DMA transfer
580 * @dest: destn address
581 * @src: src address
582 * @len: DMA transfer len
583 * @flags: DMA flags
584 *
585 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
586 * The periphral txn details should be filled in slave structure properly
587 * Returns the descriptor for this txn
588 */
589static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
590 struct dma_chan *chan, dma_addr_t dest,
591 dma_addr_t src, size_t len, unsigned long flags)
592{
593 struct intel_mid_dma_chan *midc;
594 struct intel_mid_dma_desc *desc = NULL;
595 struct intel_mid_dma_slave *mids;
596 union intel_mid_dma_ctl_lo ctl_lo;
597 union intel_mid_dma_ctl_hi ctl_hi;
598 union intel_mid_dma_cfg_lo cfg_lo;
599 union intel_mid_dma_cfg_hi cfg_hi;
600 enum dma_slave_buswidth width;
601
602 pr_debug("MDMA: Prep for memcpy\n");
603 BUG_ON(!chan);
604 if (!len)
605 return NULL;
606
607 midc = to_intel_mid_dma_chan(chan);
608 BUG_ON(!midc);
609
610 mids = midc->mid_slave;
611 BUG_ON(!mids);
612
613 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
614 midc->dma->pci_id, midc->ch_id, len);
615 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
616 mids->cfg_mode, mids->dma_slave.direction,
617 mids->hs_mode, mids->dma_slave.src_addr_width);
618
619 /*calculate CFG_LO*/
620 if (mids->hs_mode == LNW_DMA_SW_HS) {
621 cfg_lo.cfg_lo = 0;
622 cfg_lo.cfgx.hs_sel_dst = 1;
623 cfg_lo.cfgx.hs_sel_src = 1;
624 } else if (mids->hs_mode == LNW_DMA_HW_HS)
625 cfg_lo.cfg_lo = 0x00000;
626
627 /*calculate CFG_HI*/
628 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
629 /*SW HS only*/
630 cfg_hi.cfg_hi = 0;
631 } else {
632 cfg_hi.cfg_hi = 0;
633 if (midc->dma->pimr_mask) {
634 cfg_hi.cfgx.protctl = 0x0; /*default value*/
635 cfg_hi.cfgx.fifo_mode = 1;
636 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
637 cfg_hi.cfgx.src_per = 0;
638 if (mids->device_instance == 0)
639 cfg_hi.cfgx.dst_per = 3;
640 if (mids->device_instance == 1)
641 cfg_hi.cfgx.dst_per = 1;
642 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
643 if (mids->device_instance == 0)
644 cfg_hi.cfgx.src_per = 2;
645 if (mids->device_instance == 1)
646 cfg_hi.cfgx.src_per = 0;
647 cfg_hi.cfgx.dst_per = 0;
648 }
649 } else {
650 cfg_hi.cfgx.protctl = 0x1; /*default value*/
651 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
652 midc->ch_id - midc->dma->chan_base;
653 }
654 }
655
656 /*calculate CTL_HI*/
657 ctl_hi.ctlx.reser = 0;
658 ctl_hi.ctlx.done = 0;
659 width = mids->dma_slave.src_addr_width;
660
661 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
662 pr_debug("MDMA:calc len %d for block size %d\n",
663 ctl_hi.ctlx.block_ts, midc->dma->block_size);
664 /*calculate CTL_LO*/
665 ctl_lo.ctl_lo = 0;
666 ctl_lo.ctlx.int_en = 1;
667 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
668 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
669
670 /*
671 * Here we need some translation from "enum dma_slave_buswidth"
672 * to the format for our dma controller
673 * standard intel_mid_dmac's format
674 * 1 Byte 0b000
675 * 2 Bytes 0b001
676 * 4 Bytes 0b010
677 */
678 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
679 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
680
681 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
682 ctl_lo.ctlx.tt_fc = 0;
683 ctl_lo.ctlx.sinc = 0;
684 ctl_lo.ctlx.dinc = 0;
685 } else {
686 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
687 ctl_lo.ctlx.sinc = 0;
688 ctl_lo.ctlx.dinc = 2;
689 ctl_lo.ctlx.tt_fc = 1;
690 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
691 ctl_lo.ctlx.sinc = 2;
692 ctl_lo.ctlx.dinc = 0;
693 ctl_lo.ctlx.tt_fc = 2;
694 }
695 }
696
697 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
698 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
699
700 enable_dma_interrupt(midc);
701
702 desc = midc_desc_get(midc);
703 if (desc == NULL)
704 goto err_desc_get;
705 desc->sar = src;
706 desc->dar = dest ;
707 desc->len = len;
708 desc->cfg_hi = cfg_hi.cfg_hi;
709 desc->cfg_lo = cfg_lo.cfg_lo;
710 desc->ctl_lo = ctl_lo.ctl_lo;
711 desc->ctl_hi = ctl_hi.ctl_hi;
712 desc->width = width;
713 desc->dirn = mids->dma_slave.direction;
714 desc->lli_phys = 0;
715 desc->lli = NULL;
716 desc->lli_pool = NULL;
717 return &desc->txd;
718
719err_desc_get:
720 pr_err("ERR_MDMA: Failed to get desc\n");
721 midc_desc_put(midc, desc);
722 return NULL;
723}
724/**
725 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
726 * @chan: chan for DMA transfer
727 * @sgl: scatter gather list
728 * @sg_len: length of sg txn
729 * @direction: DMA transfer dirtn
730 * @flags: DMA flags
731 *
732 * Prepares LLI based periphral transfer
733 */
734static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
735 struct dma_chan *chan, struct scatterlist *sgl,
736 unsigned int sg_len, enum dma_data_direction direction,
737 unsigned long flags)
738{
739 struct intel_mid_dma_chan *midc = NULL;
740 struct intel_mid_dma_slave *mids = NULL;
741 struct intel_mid_dma_desc *desc = NULL;
742 struct dma_async_tx_descriptor *txd = NULL;
743 union intel_mid_dma_ctl_lo ctl_lo;
744
745 pr_debug("MDMA: Prep for slave SG\n");
746
747 if (!sg_len) {
748 pr_err("MDMA: Invalid SG length\n");
749 return NULL;
750 }
751 midc = to_intel_mid_dma_chan(chan);
752 BUG_ON(!midc);
753
754 mids = midc->mid_slave;
755 BUG_ON(!mids);
756
757 if (!midc->dma->pimr_mask) {
758 /* We can still handle sg list with only one item */
759 if (sg_len == 1) {
760 txd = intel_mid_dma_prep_memcpy(chan,
761 mids->dma_slave.dst_addr,
762 mids->dma_slave.src_addr,
763 sgl->length,
764 flags);
765 return txd;
766 } else {
767 pr_warn("MDMA: SG list is not supported by this controller\n");
768 return NULL;
769 }
770 }
771
772 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
773 sg_len, direction, flags);
774
775 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
776 if (NULL == txd) {
777 pr_err("MDMA: Prep memcpy failed\n");
778 return NULL;
779 }
780
781 desc = to_intel_mid_dma_desc(txd);
782 desc->dirn = direction;
783 ctl_lo.ctl_lo = desc->ctl_lo;
784 ctl_lo.ctlx.llp_dst_en = 1;
785 ctl_lo.ctlx.llp_src_en = 1;
786 desc->ctl_lo = ctl_lo.ctl_lo;
787 desc->lli_length = sg_len;
788 desc->current_lli = 0;
789 /* DMA coherent memory pool for LLI descriptors*/
790 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
791 midc->dma->pdev,
792 (sizeof(struct intel_mid_dma_lli)*sg_len),
793 32, 0);
794 if (NULL == desc->lli_pool) {
795 pr_err("MID_DMA:LLI pool create failed\n");
796 return NULL;
797 }
798
799 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
800 if (!desc->lli) {
801 pr_err("MID_DMA: LLI alloc failed\n");
802 pci_pool_destroy(desc->lli_pool);
803 return NULL;
804 }
805
806 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
807 if (flags & DMA_PREP_INTERRUPT) {
808 iowrite32(UNMASK_INTR_REG(midc->ch_id),
809 midc->dma_base + MASK_BLOCK);
810 pr_debug("MDMA:Enabled Block interrupt\n");
811 }
812 return &desc->txd;
813}
814
815/**
816 * intel_mid_dma_free_chan_resources - Frees dma resources
817 * @chan: chan requiring attention
818 *
819 * Frees the allocated resources on this DMA chan
820 */
821static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
822{
823 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
824 struct middma_device *mid = to_middma_device(chan->device);
825 struct intel_mid_dma_desc *desc, *_desc;
826
827 if (true == midc->busy) {
828 /*trying to free ch in use!!!!!*/
829 pr_err("ERR_MDMA: trying to free ch in use\n");
830 }
831 pm_runtime_put(&mid->pdev->dev);
832 spin_lock_bh(&midc->lock);
833 midc->descs_allocated = 0;
834 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
835 list_del(&desc->desc_node);
836 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
837 }
838 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
839 list_del(&desc->desc_node);
840 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
841 }
842 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
843 list_del(&desc->desc_node);
844 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
845 }
846 spin_unlock_bh(&midc->lock);
847 midc->in_use = false;
848 midc->busy = false;
849 /* Disable CH interrupts */
850 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
851 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
852}
853
854/**
855 * intel_mid_dma_alloc_chan_resources - Allocate dma resources
856 * @chan: chan requiring attention
857 *
858 * Allocates DMA resources on this chan
859 * Return the descriptors allocated
860 */
861static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
862{
863 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
864 struct middma_device *mid = to_middma_device(chan->device);
865 struct intel_mid_dma_desc *desc;
866 dma_addr_t phys;
867 int i = 0;
868
869 pm_runtime_get_sync(&mid->pdev->dev);
870
871 if (mid->state == SUSPENDED) {
872 if (dma_resume(mid->pdev)) {
873 pr_err("ERR_MDMA: resume failed");
874 return -EFAULT;
875 }
876 }
877
878 /* ASSERT: channel is idle */
879 if (test_ch_en(mid->dma_base, midc->ch_id)) {
880 /*ch is not idle*/
881 pr_err("ERR_MDMA: ch not idle\n");
882 pm_runtime_put(&mid->pdev->dev);
883 return -EIO;
884 }
885 midc->completed = chan->cookie = 1;
886
887 spin_lock_bh(&midc->lock);
888 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
889 spin_unlock_bh(&midc->lock);
890 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
891 if (!desc) {
892 pr_err("ERR_MDMA: desc failed\n");
893 pm_runtime_put(&mid->pdev->dev);
894 return -ENOMEM;
895 /*check*/
896 }
897 dma_async_tx_descriptor_init(&desc->txd, chan);
898 desc->txd.tx_submit = intel_mid_dma_tx_submit;
899 desc->txd.flags = DMA_CTRL_ACK;
900 desc->txd.phys = phys;
901 spin_lock_bh(&midc->lock);
902 i = ++midc->descs_allocated;
903 list_add_tail(&desc->desc_node, &midc->free_list);
904 }
905 spin_unlock_bh(&midc->lock);
906 midc->in_use = true;
907 midc->busy = false;
908 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
909 return i;
910}
911
912/**
913 * midc_handle_error - Handle DMA txn error
914 * @mid: controller where error occurred
915 * @midc: chan where error occurred
916 *
917 * Scan the descriptor for error
918 */
919static void midc_handle_error(struct middma_device *mid,
920 struct intel_mid_dma_chan *midc)
921{
922 midc_scan_descriptors(mid, midc);
923}
924
925/**
926 * dma_tasklet - DMA interrupt tasklet
927 * @data: tasklet arg (the controller structure)
928 *
929 * Scan the controller for interrupts for completion/error
930 * Clear the interrupt and call for handling completion/error
931 */
932static void dma_tasklet(unsigned long data)
933{
934 struct middma_device *mid = NULL;
935 struct intel_mid_dma_chan *midc = NULL;
936 u32 status, raw_tfr, raw_block;
937 int i;
938
939 mid = (struct middma_device *)data;
940 if (mid == NULL) {
941 pr_err("ERR_MDMA: tasklet Null param\n");
942 return;
943 }
944 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
945 raw_tfr = ioread32(mid->dma_base + RAW_TFR);
946 raw_block = ioread32(mid->dma_base + RAW_BLOCK);
947 status = raw_tfr | raw_block;
948 status &= mid->intr_mask;
949 while (status) {
950 /*txn interrupt*/
951 i = get_ch_index(&status, mid->chan_base);
952 if (i < 0) {
953 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
954 return;
955 }
956 midc = &mid->ch[i];
957 if (midc == NULL) {
958 pr_err("ERR_MDMA:Null param midc\n");
959 return;
960 }
961 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
962 status, midc->ch_id, i);
963 midc->raw_tfr = raw_tfr;
964 midc->raw_block = raw_block;
965 spin_lock_bh(&midc->lock);
966 /*clearing this interrupts first*/
967 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
968 if (raw_block) {
969 iowrite32((1 << midc->ch_id),
970 mid->dma_base + CLEAR_BLOCK);
971 }
972 midc_scan_descriptors(mid, midc);
973 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
974 iowrite32(UNMASK_INTR_REG(midc->ch_id),
975 mid->dma_base + MASK_TFR);
976 if (raw_block) {
977 iowrite32(UNMASK_INTR_REG(midc->ch_id),
978 mid->dma_base + MASK_BLOCK);
979 }
980 spin_unlock_bh(&midc->lock);
981 }
982
983 status = ioread32(mid->dma_base + RAW_ERR);
984 status &= mid->intr_mask;
985 while (status) {
986 /*err interrupt*/
987 i = get_ch_index(&status, mid->chan_base);
988 if (i < 0) {
989 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
990 return;
991 }
992 midc = &mid->ch[i];
993 if (midc == NULL) {
994 pr_err("ERR_MDMA:Null param midc\n");
995 return;
996 }
997 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
998 status, midc->ch_id, i);
999
1000 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
1001 spin_lock_bh(&midc->lock);
1002 midc_handle_error(mid, midc);
1003 iowrite32(UNMASK_INTR_REG(midc->ch_id),
1004 mid->dma_base + MASK_ERR);
1005 spin_unlock_bh(&midc->lock);
1006 }
1007 pr_debug("MDMA:Exiting takslet...\n");
1008 return;
1009}
1010
1011static void dma_tasklet1(unsigned long data)
1012{
1013 pr_debug("MDMA:in takslet1...\n");
1014 return dma_tasklet(data);
1015}
1016
1017static void dma_tasklet2(unsigned long data)
1018{
1019 pr_debug("MDMA:in takslet2...\n");
1020 return dma_tasklet(data);
1021}
1022
1023/**
1024 * intel_mid_dma_interrupt - DMA ISR
1025 * @irq: IRQ where interrupt occurred
1026 * @data: ISR cllback data (the controller structure)
1027 *
1028 * See if this is our interrupt if so then schedule the tasklet
1029 * otherwise ignore
1030 */
1031static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1032{
1033 struct middma_device *mid = data;
1034 u32 tfr_status, err_status;
1035 int call_tasklet = 0;
1036
1037 tfr_status = ioread32(mid->dma_base + RAW_TFR);
1038 err_status = ioread32(mid->dma_base + RAW_ERR);
1039 if (!tfr_status && !err_status)
1040 return IRQ_NONE;
1041
1042 /*DMA Interrupt*/
1043 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
1044 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
1045 tfr_status &= mid->intr_mask;
1046 if (tfr_status) {
1047 /*need to disable intr*/
1048 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
1049 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
1050 pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
1051 call_tasklet = 1;
1052 }
1053 err_status &= mid->intr_mask;
1054 if (err_status) {
1055 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
1056 call_tasklet = 1;
1057 }
1058 if (call_tasklet)
1059 tasklet_schedule(&mid->tasklet);
1060
1061 return IRQ_HANDLED;
1062}
1063
1064static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
1065{
1066 return intel_mid_dma_interrupt(irq, data);
1067}
1068
1069static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
1070{
1071 return intel_mid_dma_interrupt(irq, data);
1072}
1073
1074/**
1075 * mid_setup_dma - Setup the DMA controller
1076 * @pdev: Controller PCI device structure
1077 *
1078 * Initialize the DMA controller, channels, registers with DMA engine,
1079 * ISR. Initialize DMA controller channels.
1080 */
1081static int mid_setup_dma(struct pci_dev *pdev)
1082{
1083 struct middma_device *dma = pci_get_drvdata(pdev);
1084 int err, i;
1085
1086 /* DMA coherent memory pool for DMA descriptor allocations */
1087 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
1088 sizeof(struct intel_mid_dma_desc),
1089 32, 0);
1090 if (NULL == dma->dma_pool) {
1091 pr_err("ERR_MDMA:pci_pool_create failed\n");
1092 err = -ENOMEM;
1093 goto err_dma_pool;
1094 }
1095
1096 INIT_LIST_HEAD(&dma->common.channels);
1097 dma->pci_id = pdev->device;
1098 if (dma->pimr_mask) {
1099 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
1100 LNW_PERIPHRAL_MASK_SIZE);
1101 if (dma->mask_reg == NULL) {
1102 pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
1103 return -ENOMEM;
1104 }
1105 } else
1106 dma->mask_reg = NULL;
1107
1108 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
1109 /*init CH structures*/
1110 dma->intr_mask = 0;
1111 dma->state = RUNNING;
1112 for (i = 0; i < dma->max_chan; i++) {
1113 struct intel_mid_dma_chan *midch = &dma->ch[i];
1114
1115 midch->chan.device = &dma->common;
1116 midch->chan.cookie = 1;
1117 midch->chan.chan_id = i;
1118 midch->ch_id = dma->chan_base + i;
1119 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1120
1121 midch->dma_base = dma->dma_base;
1122 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
1123 midch->dma = dma;
1124 dma->intr_mask |= 1 << (dma->chan_base + i);
1125 spin_lock_init(&midch->lock);
1126
1127 INIT_LIST_HEAD(&midch->active_list);
1128 INIT_LIST_HEAD(&midch->queue);
1129 INIT_LIST_HEAD(&midch->free_list);
1130 /*mask interrupts*/
1131 iowrite32(MASK_INTR_REG(midch->ch_id),
1132 dma->dma_base + MASK_BLOCK);
1133 iowrite32(MASK_INTR_REG(midch->ch_id),
1134 dma->dma_base + MASK_SRC_TRAN);
1135 iowrite32(MASK_INTR_REG(midch->ch_id),
1136 dma->dma_base + MASK_DST_TRAN);
1137 iowrite32(MASK_INTR_REG(midch->ch_id),
1138 dma->dma_base + MASK_ERR);
1139 iowrite32(MASK_INTR_REG(midch->ch_id),
1140 dma->dma_base + MASK_TFR);
1141
1142 disable_dma_interrupt(midch);
1143 list_add_tail(&midch->chan.device_node, &dma->common.channels);
1144 }
1145 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
1146
1147 /*init dma structure*/
1148 dma_cap_zero(dma->common.cap_mask);
1149 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
1150 dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1151 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1152 dma->common.dev = &pdev->dev;
1153 dma->common.chancnt = dma->max_chan;
1154
1155 dma->common.device_alloc_chan_resources =
1156 intel_mid_dma_alloc_chan_resources;
1157 dma->common.device_free_chan_resources =
1158 intel_mid_dma_free_chan_resources;
1159
1160 dma->common.device_tx_status = intel_mid_dma_tx_status;
1161 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1162 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1163 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1164 dma->common.device_control = intel_mid_dma_device_control;
1165
1166 /*enable dma cntrl*/
1167 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
1168
1169 /*register irq */
1170 if (dma->pimr_mask) {
1171 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
1172 err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
1173 IRQF_SHARED, "INTEL_MID_DMAC1", dma);
1174 if (0 != err)
1175 goto err_irq;
1176 } else {
1177 dma->intr_mask = 0x03;
1178 pr_debug("MDMA:Requesting irq for DMAC2\n");
1179 err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
1180 IRQF_SHARED, "INTEL_MID_DMAC2", dma);
1181 if (0 != err)
1182 goto err_irq;
1183 }
1184 /*register device w/ engine*/
1185 err = dma_async_device_register(&dma->common);
1186 if (0 != err) {
1187 pr_err("ERR_MDMA:device_register failed: %d\n", err);
1188 goto err_engine;
1189 }
1190 if (dma->pimr_mask) {
1191 pr_debug("setting up tasklet1 for DMAC1\n");
1192 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
1193 } else {
1194 pr_debug("setting up tasklet2 for DMAC2\n");
1195 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
1196 }
1197 return 0;
1198
1199err_engine:
1200 free_irq(pdev->irq, dma);
1201err_irq:
1202 pci_pool_destroy(dma->dma_pool);
1203err_dma_pool:
1204 pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
1205 return err;
1206
1207}
1208
1209/**
1210 * middma_shutdown - Shutdown the DMA controller
1211 * @pdev: Controller PCI device structure
1212 *
1213 * Called by remove
1214 * Unregister DMa controller, clear all structures and free interrupt
1215 */
1216static void middma_shutdown(struct pci_dev *pdev)
1217{
1218 struct middma_device *device = pci_get_drvdata(pdev);
1219
1220 dma_async_device_unregister(&device->common);
1221 pci_pool_destroy(device->dma_pool);
1222 if (device->mask_reg)
1223 iounmap(device->mask_reg);
1224 if (device->dma_base)
1225 iounmap(device->dma_base);
1226 free_irq(pdev->irq, device);
1227 return;
1228}
1229
1230/**
1231 * intel_mid_dma_probe - PCI Probe
1232 * @pdev: Controller PCI device structure
1233 * @id: pci device id structure
1234 *
1235 * Initialize the PCI device, map BARs, query driver data.
1236 * Call setup_dma to complete contoller and chan initilzation
1237 */
1238static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
1239 const struct pci_device_id *id)
1240{
1241 struct middma_device *device;
1242 u32 base_addr, bar_size;
1243 struct intel_mid_dma_probe_info *info;
1244 int err;
1245
1246 pr_debug("MDMA: probe for %x\n", pdev->device);
1247 info = (void *)id->driver_data;
1248 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1249 info->max_chan, info->ch_base,
1250 info->block_size, info->pimr_mask);
1251
1252 err = pci_enable_device(pdev);
1253 if (err)
1254 goto err_enable_device;
1255
1256 err = pci_request_regions(pdev, "intel_mid_dmac");
1257 if (err)
1258 goto err_request_regions;
1259
1260 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1261 if (err)
1262 goto err_set_dma_mask;
1263
1264 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1265 if (err)
1266 goto err_set_dma_mask;
1267
1268 device = kzalloc(sizeof(*device), GFP_KERNEL);
1269 if (!device) {
1270 pr_err("ERR_MDMA:kzalloc failed probe\n");
1271 err = -ENOMEM;
1272 goto err_kzalloc;
1273 }
1274 device->pdev = pci_dev_get(pdev);
1275
1276 base_addr = pci_resource_start(pdev, 0);
1277 bar_size = pci_resource_len(pdev, 0);
1278 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
1279 if (!device->dma_base) {
1280 pr_err("ERR_MDMA:ioremap failed\n");
1281 err = -ENOMEM;
1282 goto err_ioremap;
1283 }
1284 pci_set_drvdata(pdev, device);
1285 pci_set_master(pdev);
1286 device->max_chan = info->max_chan;
1287 device->chan_base = info->ch_base;
1288 device->block_size = info->block_size;
1289 device->pimr_mask = info->pimr_mask;
1290
1291 err = mid_setup_dma(pdev);
1292 if (err)
1293 goto err_dma;
1294
1295 pm_runtime_put_noidle(&pdev->dev);
1296 pm_runtime_allow(&pdev->dev);
1297 return 0;
1298
1299err_dma:
1300 iounmap(device->dma_base);
1301err_ioremap:
1302 pci_dev_put(pdev);
1303 kfree(device);
1304err_kzalloc:
1305err_set_dma_mask:
1306 pci_release_regions(pdev);
1307 pci_disable_device(pdev);
1308err_request_regions:
1309err_enable_device:
1310 pr_err("ERR_MDMA:Probe failed %d\n", err);
1311 return err;
1312}
1313
1314/**
1315 * intel_mid_dma_remove - PCI remove
1316 * @pdev: Controller PCI device structure
1317 *
1318 * Free up all resources and data
1319 * Call shutdown_dma to complete contoller and chan cleanup
1320 */
1321static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1322{
1323 struct middma_device *device = pci_get_drvdata(pdev);
1324
1325 pm_runtime_get_noresume(&pdev->dev);
1326 pm_runtime_forbid(&pdev->dev);
1327 middma_shutdown(pdev);
1328 pci_dev_put(pdev);
1329 kfree(device);
1330 pci_release_regions(pdev);
1331 pci_disable_device(pdev);
1332}
1333
1334/* Power Management */
1335/*
1336* dma_suspend - PCI suspend function
1337*
1338* @pci: PCI device structure
1339* @state: PM message
1340*
1341* This function is called by OS when a power event occurs
1342*/
1343int dma_suspend(struct pci_dev *pci, pm_message_t state)
1344{
1345 int i;
1346 struct middma_device *device = pci_get_drvdata(pci);
1347 pr_debug("MDMA: dma_suspend called\n");
1348
1349 for (i = 0; i < device->max_chan; i++) {
1350 if (device->ch[i].in_use)
1351 return -EAGAIN;
1352 }
1353 device->state = SUSPENDED;
1354 pci_save_state(pci);
1355 pci_disable_device(pci);
1356 pci_set_power_state(pci, PCI_D3hot);
1357 return 0;
1358}
1359
1360/**
1361* dma_resume - PCI resume function
1362*
1363* @pci: PCI device structure
1364*
1365* This function is called by OS when a power event occurs
1366*/
1367int dma_resume(struct pci_dev *pci)
1368{
1369 int ret;
1370 struct middma_device *device = pci_get_drvdata(pci);
1371
1372 pr_debug("MDMA: dma_resume called\n");
1373 pci_set_power_state(pci, PCI_D0);
1374 pci_restore_state(pci);
1375 ret = pci_enable_device(pci);
1376 if (ret) {
1377 pr_err("MDMA: device can't be enabled for %x\n", pci->device);
1378 return ret;
1379 }
1380 device->state = RUNNING;
1381 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1382 return 0;
1383}
1384
1385static int dma_runtime_suspend(struct device *dev)
1386{
1387 struct pci_dev *pci_dev = to_pci_dev(dev);
1388 struct middma_device *device = pci_get_drvdata(pci_dev);
1389
1390 device->state = SUSPENDED;
1391 return 0;
1392}
1393
1394static int dma_runtime_resume(struct device *dev)
1395{
1396 struct pci_dev *pci_dev = to_pci_dev(dev);
1397 struct middma_device *device = pci_get_drvdata(pci_dev);
1398
1399 device->state = RUNNING;
1400 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1401 return 0;
1402}
1403
1404static int dma_runtime_idle(struct device *dev)
1405{
1406 struct pci_dev *pdev = to_pci_dev(dev);
1407 struct middma_device *device = pci_get_drvdata(pdev);
1408 int i;
1409
1410 for (i = 0; i < device->max_chan; i++) {
1411 if (device->ch[i].in_use)
1412 return -EAGAIN;
1413 }
1414
1415 return pm_schedule_suspend(dev, 0);
1416}
1417
1418/******************************************************************************
1419* PCI stuff
1420*/
1421static struct pci_device_id intel_mid_dma_ids[] = {
1422 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
1423 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
1424 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
1425 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
1426 { 0, }
1427};
1428MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1429
1430static const struct dev_pm_ops intel_mid_dma_pm = {
1431 .runtime_suspend = dma_runtime_suspend,
1432 .runtime_resume = dma_runtime_resume,
1433 .runtime_idle = dma_runtime_idle,
1434};
1435
1436static struct pci_driver intel_mid_dma_pci_driver = {
1437 .name = "Intel MID DMA",
1438 .id_table = intel_mid_dma_ids,
1439 .probe = intel_mid_dma_probe,
1440 .remove = __devexit_p(intel_mid_dma_remove),
1441#ifdef CONFIG_PM
1442 .suspend = dma_suspend,
1443 .resume = dma_resume,
1444 .driver = {
1445 .pm = &intel_mid_dma_pm,
1446 },
1447#endif
1448};
1449
1450static int __init intel_mid_dma_init(void)
1451{
1452 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
1453 INTEL_MID_DMA_DRIVER_VERSION);
1454 return pci_register_driver(&intel_mid_dma_pci_driver);
1455}
1456fs_initcall(intel_mid_dma_init);
1457
1458static void __exit intel_mid_dma_exit(void)
1459{
1460 pci_unregister_driver(&intel_mid_dma_pci_driver);
1461}
1462module_exit(intel_mid_dma_exit);
1463
1464MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1465MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
1466MODULE_LICENSE("GPL v2");
1467MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);