Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/* ebus.c: EBUS DMA library code.
  3 *
  4 * Copyright (C) 1997  Eddie C. Dost  (ecd@skynet.be)
  5 * Copyright (C) 1999  David S. Miller (davem@redhat.com)
  6 */
  7
  8#include <linux/export.h>
  9#include <linux/kernel.h>
 10#include <linux/types.h>
 11#include <linux/interrupt.h>
 12#include <linux/delay.h>
 13
 14#include <asm/ebus_dma.h>
 15#include <asm/io.h>
 16
 17#define EBDMA_CSR	0x00UL	/* Control/Status */
 18#define EBDMA_ADDR	0x04UL	/* DMA Address */
 19#define EBDMA_COUNT	0x08UL	/* DMA Count */
 20
 21#define EBDMA_CSR_INT_PEND	0x00000001
 22#define EBDMA_CSR_ERR_PEND	0x00000002
 23#define EBDMA_CSR_DRAIN		0x00000004
 24#define EBDMA_CSR_INT_EN	0x00000010
 25#define EBDMA_CSR_RESET		0x00000080
 26#define EBDMA_CSR_WRITE		0x00000100
 27#define EBDMA_CSR_EN_DMA	0x00000200
 28#define EBDMA_CSR_CYC_PEND	0x00000400
 29#define EBDMA_CSR_DIAG_RD_DONE	0x00000800
 30#define EBDMA_CSR_DIAG_WR_DONE	0x00001000
 31#define EBDMA_CSR_EN_CNT	0x00002000
 32#define EBDMA_CSR_TC		0x00004000
 33#define EBDMA_CSR_DIS_CSR_DRN	0x00010000
 34#define EBDMA_CSR_BURST_SZ_MASK	0x000c0000
 35#define EBDMA_CSR_BURST_SZ_1	0x00080000
 36#define EBDMA_CSR_BURST_SZ_4	0x00000000
 37#define EBDMA_CSR_BURST_SZ_8	0x00040000
 38#define EBDMA_CSR_BURST_SZ_16	0x000c0000
 39#define EBDMA_CSR_DIAG_EN	0x00100000
 40#define EBDMA_CSR_DIS_ERR_PEND	0x00400000
 41#define EBDMA_CSR_TCI_DIS	0x00800000
 42#define EBDMA_CSR_EN_NEXT	0x01000000
 43#define EBDMA_CSR_DMA_ON	0x02000000
 44#define EBDMA_CSR_A_LOADED	0x04000000
 45#define EBDMA_CSR_NA_LOADED	0x08000000
 46#define EBDMA_CSR_DEV_ID_MASK	0xf0000000
 47
 48#define EBUS_DMA_RESET_TIMEOUT	10000
 49
 50static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
 51{
 52	int i;
 53	u32 val = 0;
 54
 55	writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
 56	udelay(1);
 57
 58	if (no_drain)
 59		return;
 60
 61	for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
 62		val = readl(p->regs + EBDMA_CSR);
 63
 64		if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
 65			break;
 66		udelay(10);
 67	}
 68}
 69
 70static irqreturn_t ebus_dma_irq(int irq, void *dev_id)
 71{
 72	struct ebus_dma_info *p = dev_id;
 73	unsigned long flags;
 74	u32 csr = 0;
 75
 76	spin_lock_irqsave(&p->lock, flags);
 77	csr = readl(p->regs + EBDMA_CSR);
 78	writel(csr, p->regs + EBDMA_CSR);
 79	spin_unlock_irqrestore(&p->lock, flags);
 80
 81	if (csr & EBDMA_CSR_ERR_PEND) {
 82		printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
 83		p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
 84		return IRQ_HANDLED;
 85	} else if (csr & EBDMA_CSR_INT_PEND) {
 86		p->callback(p,
 87			    (csr & EBDMA_CSR_TC) ?
 88			    EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
 89			    p->client_cookie);
 90		return IRQ_HANDLED;
 91	}
 92
 93	return IRQ_NONE;
 94
 95}
 96
 97int ebus_dma_register(struct ebus_dma_info *p)
 98{
 99	u32 csr;
100
101	if (!p->regs)
102		return -EINVAL;
103	if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
104			 EBUS_DMA_FLAG_TCI_DISABLE))
105		return -EINVAL;
106	if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
107		return -EINVAL;
108	if (!strlen(p->name))
109		return -EINVAL;
110
111	__ebus_dma_reset(p, 1);
112
113	csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
114
115	if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
116		csr |= EBDMA_CSR_TCI_DIS;
117
118	writel(csr, p->regs + EBDMA_CSR);
119
120	return 0;
121}
122EXPORT_SYMBOL(ebus_dma_register);
123
124int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
125{
126	unsigned long flags;
127	u32 csr;
128
129	if (on) {
130		if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
131			if (request_irq(p->irq, ebus_dma_irq, IRQF_SHARED, p->name, p))
132				return -EBUSY;
133		}
134
135		spin_lock_irqsave(&p->lock, flags);
136		csr = readl(p->regs + EBDMA_CSR);
137		csr |= EBDMA_CSR_INT_EN;
138		writel(csr, p->regs + EBDMA_CSR);
139		spin_unlock_irqrestore(&p->lock, flags);
140	} else {
141		spin_lock_irqsave(&p->lock, flags);
142		csr = readl(p->regs + EBDMA_CSR);
143		csr &= ~EBDMA_CSR_INT_EN;
144		writel(csr, p->regs + EBDMA_CSR);
145		spin_unlock_irqrestore(&p->lock, flags);
146
147		if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
148			free_irq(p->irq, p);
149		}
150	}
151
152	return 0;
153}
154EXPORT_SYMBOL(ebus_dma_irq_enable);
155
156void ebus_dma_unregister(struct ebus_dma_info *p)
157{
158	unsigned long flags;
159	u32 csr;
160	int irq_on = 0;
161
162	spin_lock_irqsave(&p->lock, flags);
163	csr = readl(p->regs + EBDMA_CSR);
164	if (csr & EBDMA_CSR_INT_EN) {
165		csr &= ~EBDMA_CSR_INT_EN;
166		writel(csr, p->regs + EBDMA_CSR);
167		irq_on = 1;
168	}
169	spin_unlock_irqrestore(&p->lock, flags);
170
171	if (irq_on)
172		free_irq(p->irq, p);
173}
174EXPORT_SYMBOL(ebus_dma_unregister);
175
176int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
177{
178	unsigned long flags;
179	u32 csr;
180	int err;
181
182	if (len >= (1 << 24))
183		return -EINVAL;
184
185	spin_lock_irqsave(&p->lock, flags);
186	csr = readl(p->regs + EBDMA_CSR);
187	err = -EINVAL;
188	if (!(csr & EBDMA_CSR_EN_DMA))
189		goto out;
190	err = -EBUSY;
191	if (csr & EBDMA_CSR_NA_LOADED)
192		goto out;
193
194	writel(len,      p->regs + EBDMA_COUNT);
195	writel(bus_addr, p->regs + EBDMA_ADDR);
196	err = 0;
197
198out:
199	spin_unlock_irqrestore(&p->lock, flags);
200
201	return err;
202}
203EXPORT_SYMBOL(ebus_dma_request);
204
205void ebus_dma_prepare(struct ebus_dma_info *p, int write)
206{
207	unsigned long flags;
208	u32 csr;
209
210	spin_lock_irqsave(&p->lock, flags);
211	__ebus_dma_reset(p, 0);
212
213	csr = (EBDMA_CSR_INT_EN |
214	       EBDMA_CSR_EN_CNT |
215	       EBDMA_CSR_BURST_SZ_16 |
216	       EBDMA_CSR_EN_NEXT);
217
218	if (write)
219		csr |= EBDMA_CSR_WRITE;
220	if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
221		csr |= EBDMA_CSR_TCI_DIS;
222
223	writel(csr, p->regs + EBDMA_CSR);
224
225	spin_unlock_irqrestore(&p->lock, flags);
226}
227EXPORT_SYMBOL(ebus_dma_prepare);
228
229unsigned int ebus_dma_residue(struct ebus_dma_info *p)
230{
231	return readl(p->regs + EBDMA_COUNT);
232}
233EXPORT_SYMBOL(ebus_dma_residue);
234
235unsigned int ebus_dma_addr(struct ebus_dma_info *p)
236{
237	return readl(p->regs + EBDMA_ADDR);
238}
239EXPORT_SYMBOL(ebus_dma_addr);
240
241void ebus_dma_enable(struct ebus_dma_info *p, int on)
242{
243	unsigned long flags;
244	u32 orig_csr, csr;
245
246	spin_lock_irqsave(&p->lock, flags);
247	orig_csr = csr = readl(p->regs + EBDMA_CSR);
248	if (on)
249		csr |= EBDMA_CSR_EN_DMA;
250	else
251		csr &= ~EBDMA_CSR_EN_DMA;
252	if ((orig_csr & EBDMA_CSR_EN_DMA) !=
253	    (csr & EBDMA_CSR_EN_DMA))
254		writel(csr, p->regs + EBDMA_CSR);
255	spin_unlock_irqrestore(&p->lock, flags);
256}
257EXPORT_SYMBOL(ebus_dma_enable);
v4.6
 
  1/* ebus.c: EBUS DMA library code.
  2 *
  3 * Copyright (C) 1997  Eddie C. Dost  (ecd@skynet.be)
  4 * Copyright (C) 1999  David S. Miller (davem@redhat.com)
  5 */
  6
  7#include <linux/export.h>
  8#include <linux/kernel.h>
  9#include <linux/types.h>
 10#include <linux/interrupt.h>
 11#include <linux/delay.h>
 12
 13#include <asm/ebus_dma.h>
 14#include <asm/io.h>
 15
 16#define EBDMA_CSR	0x00UL	/* Control/Status */
 17#define EBDMA_ADDR	0x04UL	/* DMA Address */
 18#define EBDMA_COUNT	0x08UL	/* DMA Count */
 19
 20#define EBDMA_CSR_INT_PEND	0x00000001
 21#define EBDMA_CSR_ERR_PEND	0x00000002
 22#define EBDMA_CSR_DRAIN		0x00000004
 23#define EBDMA_CSR_INT_EN	0x00000010
 24#define EBDMA_CSR_RESET		0x00000080
 25#define EBDMA_CSR_WRITE		0x00000100
 26#define EBDMA_CSR_EN_DMA	0x00000200
 27#define EBDMA_CSR_CYC_PEND	0x00000400
 28#define EBDMA_CSR_DIAG_RD_DONE	0x00000800
 29#define EBDMA_CSR_DIAG_WR_DONE	0x00001000
 30#define EBDMA_CSR_EN_CNT	0x00002000
 31#define EBDMA_CSR_TC		0x00004000
 32#define EBDMA_CSR_DIS_CSR_DRN	0x00010000
 33#define EBDMA_CSR_BURST_SZ_MASK	0x000c0000
 34#define EBDMA_CSR_BURST_SZ_1	0x00080000
 35#define EBDMA_CSR_BURST_SZ_4	0x00000000
 36#define EBDMA_CSR_BURST_SZ_8	0x00040000
 37#define EBDMA_CSR_BURST_SZ_16	0x000c0000
 38#define EBDMA_CSR_DIAG_EN	0x00100000
 39#define EBDMA_CSR_DIS_ERR_PEND	0x00400000
 40#define EBDMA_CSR_TCI_DIS	0x00800000
 41#define EBDMA_CSR_EN_NEXT	0x01000000
 42#define EBDMA_CSR_DMA_ON	0x02000000
 43#define EBDMA_CSR_A_LOADED	0x04000000
 44#define EBDMA_CSR_NA_LOADED	0x08000000
 45#define EBDMA_CSR_DEV_ID_MASK	0xf0000000
 46
 47#define EBUS_DMA_RESET_TIMEOUT	10000
 48
 49static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
 50{
 51	int i;
 52	u32 val = 0;
 53
 54	writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
 55	udelay(1);
 56
 57	if (no_drain)
 58		return;
 59
 60	for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
 61		val = readl(p->regs + EBDMA_CSR);
 62
 63		if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
 64			break;
 65		udelay(10);
 66	}
 67}
 68
 69static irqreturn_t ebus_dma_irq(int irq, void *dev_id)
 70{
 71	struct ebus_dma_info *p = dev_id;
 72	unsigned long flags;
 73	u32 csr = 0;
 74
 75	spin_lock_irqsave(&p->lock, flags);
 76	csr = readl(p->regs + EBDMA_CSR);
 77	writel(csr, p->regs + EBDMA_CSR);
 78	spin_unlock_irqrestore(&p->lock, flags);
 79
 80	if (csr & EBDMA_CSR_ERR_PEND) {
 81		printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
 82		p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
 83		return IRQ_HANDLED;
 84	} else if (csr & EBDMA_CSR_INT_PEND) {
 85		p->callback(p,
 86			    (csr & EBDMA_CSR_TC) ?
 87			    EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
 88			    p->client_cookie);
 89		return IRQ_HANDLED;
 90	}
 91
 92	return IRQ_NONE;
 93
 94}
 95
 96int ebus_dma_register(struct ebus_dma_info *p)
 97{
 98	u32 csr;
 99
100	if (!p->regs)
101		return -EINVAL;
102	if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
103			 EBUS_DMA_FLAG_TCI_DISABLE))
104		return -EINVAL;
105	if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
106		return -EINVAL;
107	if (!strlen(p->name))
108		return -EINVAL;
109
110	__ebus_dma_reset(p, 1);
111
112	csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
113
114	if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
115		csr |= EBDMA_CSR_TCI_DIS;
116
117	writel(csr, p->regs + EBDMA_CSR);
118
119	return 0;
120}
121EXPORT_SYMBOL(ebus_dma_register);
122
123int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
124{
125	unsigned long flags;
126	u32 csr;
127
128	if (on) {
129		if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
130			if (request_irq(p->irq, ebus_dma_irq, IRQF_SHARED, p->name, p))
131				return -EBUSY;
132		}
133
134		spin_lock_irqsave(&p->lock, flags);
135		csr = readl(p->regs + EBDMA_CSR);
136		csr |= EBDMA_CSR_INT_EN;
137		writel(csr, p->regs + EBDMA_CSR);
138		spin_unlock_irqrestore(&p->lock, flags);
139	} else {
140		spin_lock_irqsave(&p->lock, flags);
141		csr = readl(p->regs + EBDMA_CSR);
142		csr &= ~EBDMA_CSR_INT_EN;
143		writel(csr, p->regs + EBDMA_CSR);
144		spin_unlock_irqrestore(&p->lock, flags);
145
146		if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
147			free_irq(p->irq, p);
148		}
149	}
150
151	return 0;
152}
153EXPORT_SYMBOL(ebus_dma_irq_enable);
154
155void ebus_dma_unregister(struct ebus_dma_info *p)
156{
157	unsigned long flags;
158	u32 csr;
159	int irq_on = 0;
160
161	spin_lock_irqsave(&p->lock, flags);
162	csr = readl(p->regs + EBDMA_CSR);
163	if (csr & EBDMA_CSR_INT_EN) {
164		csr &= ~EBDMA_CSR_INT_EN;
165		writel(csr, p->regs + EBDMA_CSR);
166		irq_on = 1;
167	}
168	spin_unlock_irqrestore(&p->lock, flags);
169
170	if (irq_on)
171		free_irq(p->irq, p);
172}
173EXPORT_SYMBOL(ebus_dma_unregister);
174
175int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
176{
177	unsigned long flags;
178	u32 csr;
179	int err;
180
181	if (len >= (1 << 24))
182		return -EINVAL;
183
184	spin_lock_irqsave(&p->lock, flags);
185	csr = readl(p->regs + EBDMA_CSR);
186	err = -EINVAL;
187	if (!(csr & EBDMA_CSR_EN_DMA))
188		goto out;
189	err = -EBUSY;
190	if (csr & EBDMA_CSR_NA_LOADED)
191		goto out;
192
193	writel(len,      p->regs + EBDMA_COUNT);
194	writel(bus_addr, p->regs + EBDMA_ADDR);
195	err = 0;
196
197out:
198	spin_unlock_irqrestore(&p->lock, flags);
199
200	return err;
201}
202EXPORT_SYMBOL(ebus_dma_request);
203
204void ebus_dma_prepare(struct ebus_dma_info *p, int write)
205{
206	unsigned long flags;
207	u32 csr;
208
209	spin_lock_irqsave(&p->lock, flags);
210	__ebus_dma_reset(p, 0);
211
212	csr = (EBDMA_CSR_INT_EN |
213	       EBDMA_CSR_EN_CNT |
214	       EBDMA_CSR_BURST_SZ_16 |
215	       EBDMA_CSR_EN_NEXT);
216
217	if (write)
218		csr |= EBDMA_CSR_WRITE;
219	if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
220		csr |= EBDMA_CSR_TCI_DIS;
221
222	writel(csr, p->regs + EBDMA_CSR);
223
224	spin_unlock_irqrestore(&p->lock, flags);
225}
226EXPORT_SYMBOL(ebus_dma_prepare);
227
228unsigned int ebus_dma_residue(struct ebus_dma_info *p)
229{
230	return readl(p->regs + EBDMA_COUNT);
231}
232EXPORT_SYMBOL(ebus_dma_residue);
233
234unsigned int ebus_dma_addr(struct ebus_dma_info *p)
235{
236	return readl(p->regs + EBDMA_ADDR);
237}
238EXPORT_SYMBOL(ebus_dma_addr);
239
240void ebus_dma_enable(struct ebus_dma_info *p, int on)
241{
242	unsigned long flags;
243	u32 orig_csr, csr;
244
245	spin_lock_irqsave(&p->lock, flags);
246	orig_csr = csr = readl(p->regs + EBDMA_CSR);
247	if (on)
248		csr |= EBDMA_CSR_EN_DMA;
249	else
250		csr &= ~EBDMA_CSR_EN_DMA;
251	if ((orig_csr & EBDMA_CSR_EN_DMA) !=
252	    (csr & EBDMA_CSR_EN_DMA))
253		writel(csr, p->regs + EBDMA_CSR);
254	spin_unlock_irqrestore(&p->lock, flags);
255}
256EXPORT_SYMBOL(ebus_dma_enable);