Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Generic PXA PATA driver
  4 *
  5 * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 
 10#include <linux/blkdev.h>
 11#include <linux/ata.h>
 12#include <linux/libata.h>
 13#include <linux/platform_device.h>
 14#include <linux/dmaengine.h>
 15#include <linux/slab.h>
 16#include <linux/completion.h>
 17
 18#include <scsi/scsi_host.h>
 19
 20#include <linux/platform_data/ata-pxa.h>
 
 
 21
 22#define DRV_NAME	"pata_pxa"
 23#define DRV_VERSION	"0.1"
 24
 25struct pata_pxa_data {
 26	struct dma_chan		*dma_chan;
 27	dma_cookie_t		dma_cookie;
 
 
 
 
 
 
 
 
 
 
 28	struct completion	dma_done;
 29};
 30
 31/*
 32 * DMA interrupt handler.
 
 33 */
 34static void pxa_ata_dma_irq(void *d)
 35{
 36	struct pata_pxa_data *pd = d;
 37	enum dma_status status;
 38
 39	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
 40	if (status == DMA_ERROR || status == DMA_COMPLETE)
 41		complete(&pd->dma_done);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42}
 43
 44/*
 45 * Prepare taskfile for submission.
 46 */
 47static void pxa_qc_prep(struct ata_queued_cmd *qc)
 48{
 49	struct pata_pxa_data *pd = qc->ap->private_data;
 50	struct dma_async_tx_descriptor *tx;
 51	enum dma_transfer_direction dir;
 52
 53	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 54		return;
 55
 56	dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
 57	tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
 58				     DMA_PREP_INTERRUPT);
 59	if (!tx) {
 60		ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
 61		return;
 62	}
 63	tx->callback = pxa_ata_dma_irq;
 64	tx->callback_param = pd;
 65	pd->dma_cookie = dmaengine_submit(tx);
 
 
 
 
 
 
 66}
 67
 68/*
 69 * Configure the DMA controller, load the DMA descriptors, but don't start the
 70 * DMA controller yet. Only issue the ATA command.
 71 */
 72static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
 73{
 74	qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
 75}
 76
 77/*
 78 * Execute the DMA transfer.
 79 */
 80static void pxa_bmdma_start(struct ata_queued_cmd *qc)
 81{
 82	struct pata_pxa_data *pd = qc->ap->private_data;
 83	init_completion(&pd->dma_done);
 84	dma_async_issue_pending(pd->dma_chan);
 85}
 86
 87/*
 88 * Wait until the DMA transfer completes, then stop the DMA controller.
 89 */
 90static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
 91{
 92	struct pata_pxa_data *pd = qc->ap->private_data;
 93	enum dma_status status;
 94
 95	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
 96	if (status != DMA_ERROR && status != DMA_COMPLETE &&
 97	    wait_for_completion_timeout(&pd->dma_done, HZ))
 98		ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");
 99
100	dmaengine_terminate_all(pd->dma_chan);
101}
102
103/*
104 * Read DMA status. The bmdma_stop() will take care of properly finishing the
105 * DMA transfer so we always have DMA-complete interrupt here.
106 */
107static unsigned char pxa_bmdma_status(struct ata_port *ap)
108{
109	struct pata_pxa_data *pd = ap->private_data;
110	unsigned char ret = ATA_DMA_INTR;
111	struct dma_tx_state state;
112	enum dma_status status;
113
114	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state);
115	if (status != DMA_COMPLETE)
116		ret |= ATA_DMA_ERR;
117
118	return ret;
119}
120
121/*
122 * No IRQ register present so we do nothing.
123 */
124static void pxa_irq_clear(struct ata_port *ap)
125{
126}
127
128/*
129 * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
130 * unclear why ATAPI has DMA issues.
131 */
132static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
133{
134	return -EOPNOTSUPP;
135}
136
137static struct scsi_host_template pxa_ata_sht = {
138	ATA_BMDMA_SHT(DRV_NAME),
139};
140
141static struct ata_port_operations pxa_ata_port_ops = {
142	.inherits		= &ata_bmdma_port_ops,
143	.cable_detect		= ata_cable_40wire,
144
145	.bmdma_setup		= pxa_bmdma_setup,
146	.bmdma_start		= pxa_bmdma_start,
147	.bmdma_stop		= pxa_bmdma_stop,
148	.bmdma_status		= pxa_bmdma_status,
149
150	.check_atapi_dma	= pxa_check_atapi_dma,
151
152	.sff_irq_clear		= pxa_irq_clear,
153
154	.qc_prep		= pxa_qc_prep,
155};
156
157static int pxa_ata_probe(struct platform_device *pdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158{
159	struct ata_host *host;
160	struct ata_port *ap;
161	struct pata_pxa_data *data;
162	struct resource *cmd_res;
163	struct resource *ctl_res;
164	struct resource *dma_res;
165	struct resource *irq_res;
166	struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
167	struct dma_slave_config	config;
168	int ret = 0;
169
170	/*
171	 * Resource validation, three resources are needed:
172	 *  - CMD port base address
173	 *  - CTL port base address
174	 *  - DMA port base address
175	 *  - IRQ pin
176	 */
177	if (pdev->num_resources != 4) {
178		dev_err(&pdev->dev, "invalid number of resources\n");
179		return -EINVAL;
180	}
181
182	/*
183	 * CMD port base address
184	 */
185	cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
186	if (unlikely(cmd_res == NULL))
187		return -EINVAL;
188
189	/*
190	 * CTL port base address
191	 */
192	ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
193	if (unlikely(ctl_res == NULL))
194		return -EINVAL;
195
196	/*
197	 * DMA port base address
198	 */
199	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
200	if (unlikely(dma_res == NULL))
201		return -EINVAL;
202
203	/*
204	 * IRQ pin
205	 */
206	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
207	if (unlikely(irq_res == NULL))
208		return -EINVAL;
209
210	/*
211	 * Allocate the host
212	 */
213	host = ata_host_alloc(&pdev->dev, 1);
214	if (!host)
215		return -ENOMEM;
216
217	ap		= host->ports[0];
218	ap->ops		= &pxa_ata_port_ops;
219	ap->pio_mask	= ATA_PIO4;
220	ap->mwdma_mask	= ATA_MWDMA2;
221
222	ap->ioaddr.cmd_addr	= devm_ioremap(&pdev->dev, cmd_res->start,
223						resource_size(cmd_res));
224	ap->ioaddr.ctl_addr	= devm_ioremap(&pdev->dev, ctl_res->start,
225						resource_size(ctl_res));
226	ap->ioaddr.bmdma_addr	= devm_ioremap(&pdev->dev, dma_res->start,
227						resource_size(dma_res));
228
229	/*
230	 * Adjust register offsets
231	 */
232	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
233	ap->ioaddr.data_addr	= ap->ioaddr.cmd_addr +
234					(ATA_REG_DATA << pdata->reg_shift);
235	ap->ioaddr.error_addr	= ap->ioaddr.cmd_addr +
236					(ATA_REG_ERR << pdata->reg_shift);
237	ap->ioaddr.feature_addr	= ap->ioaddr.cmd_addr +
238					(ATA_REG_FEATURE << pdata->reg_shift);
239	ap->ioaddr.nsect_addr	= ap->ioaddr.cmd_addr +
240					(ATA_REG_NSECT << pdata->reg_shift);
241	ap->ioaddr.lbal_addr	= ap->ioaddr.cmd_addr +
242					(ATA_REG_LBAL << pdata->reg_shift);
243	ap->ioaddr.lbam_addr	= ap->ioaddr.cmd_addr +
244					(ATA_REG_LBAM << pdata->reg_shift);
245	ap->ioaddr.lbah_addr	= ap->ioaddr.cmd_addr +
246					(ATA_REG_LBAH << pdata->reg_shift);
247	ap->ioaddr.device_addr	= ap->ioaddr.cmd_addr +
248					(ATA_REG_DEVICE << pdata->reg_shift);
249	ap->ioaddr.status_addr	= ap->ioaddr.cmd_addr +
250					(ATA_REG_STATUS << pdata->reg_shift);
251	ap->ioaddr.command_addr	= ap->ioaddr.cmd_addr +
252					(ATA_REG_CMD << pdata->reg_shift);
253
254	/*
255	 * Allocate and load driver's internal data structure
256	 */
257	data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
258								GFP_KERNEL);
259	if (!data)
260		return -ENOMEM;
261
262	ap->private_data = data;
 
 
263
264	memset(&config, 0, sizeof(config));
265	config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
266	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
267	config.src_addr = dma_res->start;
268	config.dst_addr = dma_res->start;
269	config.src_maxburst = 32;
270	config.dst_maxburst = 32;
271
272	/*
273	 * Request the DMA channel
274	 */
275	data->dma_chan =
276		dma_request_slave_channel(&pdev->dev, "data");
277	if (!data->dma_chan)
278		return -EBUSY;
279	ret = dmaengine_slave_config(data->dma_chan, &config);
280	if (ret < 0) {
281		dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
282		return ret;
283	}
284
285	/*
286	 * Activate the ATA host
287	 */
288	ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
289				pdata->irq_flags, &pxa_ata_sht);
290	if (ret)
291		dma_release_channel(data->dma_chan);
292
293	return ret;
294}
295
296static int pxa_ata_remove(struct platform_device *pdev)
297{
298	struct ata_host *host = platform_get_drvdata(pdev);
299	struct pata_pxa_data *data = host->ports[0]->private_data;
300
301	dma_release_channel(data->dma_chan);
302
303	ata_host_detach(host);
304
305	return 0;
306}
307
308static struct platform_driver pxa_ata_driver = {
309	.probe		= pxa_ata_probe,
310	.remove		= pxa_ata_remove,
311	.driver		= {
312		.name		= DRV_NAME,
 
313	},
314};
315
316module_platform_driver(pxa_ata_driver);
 
 
 
 
 
 
 
 
 
 
 
317
318MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
319MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
320MODULE_LICENSE("GPL");
321MODULE_VERSION(DRV_VERSION);
322MODULE_ALIAS("platform:" DRV_NAME);
v3.1
 
  1/*
  2 * Generic PXA PATA driver
  3 *
  4 * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
  5 *
  6 *  This program is free software; you can redistribute it and/or modify
  7 *  it under the terms of the GNU General Public License as published by
  8 *  the Free Software Foundation; either version 2, or (at your option)
  9 *  any later version.
 10 *
 11 *  This program is distributed in the hope that it will be useful,
 12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 *  GNU General Public License for more details.
 15 *
 16 *  You should have received a copy of the GNU General Public License
 17 *  along with this program; see the file COPYING.  If not, write to
 18 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 19 */
 20
 21#include <linux/kernel.h>
 22#include <linux/module.h>
 23#include <linux/init.h>
 24#include <linux/blkdev.h>
 25#include <linux/ata.h>
 26#include <linux/libata.h>
 27#include <linux/platform_device.h>
 28#include <linux/gpio.h>
 29#include <linux/slab.h>
 30#include <linux/completion.h>
 31
 32#include <scsi/scsi_host.h>
 33
 34#include <mach/pxa2xx-regs.h>
 35#include <mach/pata_pxa.h>
 36#include <mach/dma.h>
 37
 38#define DRV_NAME	"pata_pxa"
 39#define DRV_VERSION	"0.1"
 40
 41struct pata_pxa_data {
 42	uint32_t		dma_channel;
 43	struct pxa_dma_desc	*dma_desc;
 44	dma_addr_t		dma_desc_addr;
 45	uint32_t		dma_desc_id;
 46
 47	/* DMA IO physical address */
 48	uint32_t		dma_io_addr;
 49	/* PXA DREQ<0:2> pin selector */
 50	uint32_t		dma_dreq;
 51	/* DMA DCSR register value */
 52	uint32_t		dma_dcsr;
 53
 54	struct completion	dma_done;
 55};
 56
 57/*
 58 * Setup the DMA descriptors. The size is transfer capped at 4k per descriptor,
 59 * if the transfer is longer, it is split into multiple chained descriptors.
 60 */
 61static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc)
 62{
 63	struct pata_pxa_data *pd = qc->ap->private_data;
 
 64
 65	uint32_t cpu_len, seg_len;
 66	dma_addr_t cpu_addr;
 67
 68	cpu_addr = sg_dma_address(sg);
 69	cpu_len = sg_dma_len(sg);
 70
 71	do {
 72		seg_len = (cpu_len > 0x1000) ? 0x1000 : cpu_len;
 73
 74		pd->dma_desc[pd->dma_desc_id].ddadr = pd->dma_desc_addr +
 75			((pd->dma_desc_id + 1) * sizeof(struct pxa_dma_desc));
 76
 77		pd->dma_desc[pd->dma_desc_id].dcmd = DCMD_BURST32 |
 78					DCMD_WIDTH2 | (DCMD_LENGTH & seg_len);
 79
 80		if (qc->tf.flags & ATA_TFLAG_WRITE) {
 81			pd->dma_desc[pd->dma_desc_id].dsadr = cpu_addr;
 82			pd->dma_desc[pd->dma_desc_id].dtadr = pd->dma_io_addr;
 83			pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCSRCADDR |
 84						DCMD_FLOWTRG;
 85		} else {
 86			pd->dma_desc[pd->dma_desc_id].dsadr = pd->dma_io_addr;
 87			pd->dma_desc[pd->dma_desc_id].dtadr = cpu_addr;
 88			pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCTRGADDR |
 89						DCMD_FLOWSRC;
 90		}
 91
 92		cpu_len -= seg_len;
 93		cpu_addr += seg_len;
 94		pd->dma_desc_id++;
 95
 96	} while (cpu_len);
 97
 98	/* Should not happen */
 99	if (seg_len & 0x1f)
100		DALGN |= (1 << pd->dma_dreq);
101}
102
103/*
104 * Prepare taskfile for submission.
105 */
106static void pxa_qc_prep(struct ata_queued_cmd *qc)
107{
108	struct pata_pxa_data *pd = qc->ap->private_data;
109	int si = 0;
110	struct scatterlist *sg;
111
112	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
113		return;
114
115	pd->dma_desc_id = 0;
116
117	DCSR(pd->dma_channel) = 0;
118	DALGN &= ~(1 << pd->dma_dreq);
119
120	for_each_sg(qc->sg, sg, qc->n_elem, si)
121		pxa_load_dmac(sg, qc);
122
123	pd->dma_desc[pd->dma_desc_id - 1].ddadr = DDADR_STOP;
124
125	/* Fire IRQ only at the end of last block */
126	pd->dma_desc[pd->dma_desc_id - 1].dcmd |= DCMD_ENDIRQEN;
127
128	DDADR(pd->dma_channel) = pd->dma_desc_addr;
129	DRCMR(pd->dma_dreq) = DRCMR_MAPVLD | pd->dma_channel;
130
131}
132
133/*
134 * Configure the DMA controller, load the DMA descriptors, but don't start the
135 * DMA controller yet. Only issue the ATA command.
136 */
137static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
138{
139	qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
140}
141
142/*
143 * Execute the DMA transfer.
144 */
145static void pxa_bmdma_start(struct ata_queued_cmd *qc)
146{
147	struct pata_pxa_data *pd = qc->ap->private_data;
148	init_completion(&pd->dma_done);
149	DCSR(pd->dma_channel) = DCSR_RUN;
150}
151
152/*
153 * Wait until the DMA transfer completes, then stop the DMA controller.
154 */
155static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
156{
157	struct pata_pxa_data *pd = qc->ap->private_data;
 
158
159	if ((DCSR(pd->dma_channel) & DCSR_RUN) &&
160		wait_for_completion_timeout(&pd->dma_done, HZ))
161		dev_err(qc->ap->dev, "Timeout waiting for DMA completion!");
 
162
163	DCSR(pd->dma_channel) = 0;
164}
165
166/*
167 * Read DMA status. The bmdma_stop() will take care of properly finishing the
168 * DMA transfer so we always have DMA-complete interrupt here.
169 */
170static unsigned char pxa_bmdma_status(struct ata_port *ap)
171{
172	struct pata_pxa_data *pd = ap->private_data;
173	unsigned char ret = ATA_DMA_INTR;
 
 
174
175	if (pd->dma_dcsr & DCSR_BUSERR)
 
176		ret |= ATA_DMA_ERR;
177
178	return ret;
179}
180
181/*
182 * No IRQ register present so we do nothing.
183 */
184static void pxa_irq_clear(struct ata_port *ap)
185{
186}
187
188/*
189 * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
190 * unclear why ATAPI has DMA issues.
191 */
192static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
193{
194	return -EOPNOTSUPP;
195}
196
197static struct scsi_host_template pxa_ata_sht = {
198	ATA_BMDMA_SHT(DRV_NAME),
199};
200
201static struct ata_port_operations pxa_ata_port_ops = {
202	.inherits		= &ata_bmdma_port_ops,
203	.cable_detect		= ata_cable_40wire,
204
205	.bmdma_setup		= pxa_bmdma_setup,
206	.bmdma_start		= pxa_bmdma_start,
207	.bmdma_stop		= pxa_bmdma_stop,
208	.bmdma_status		= pxa_bmdma_status,
209
210	.check_atapi_dma	= pxa_check_atapi_dma,
211
212	.sff_irq_clear		= pxa_irq_clear,
213
214	.qc_prep		= pxa_qc_prep,
215};
216
217/*
218 * DMA interrupt handler.
219 */
220static void pxa_ata_dma_irq(int dma, void *port)
221{
222	struct ata_port *ap = port;
223	struct pata_pxa_data *pd = ap->private_data;
224
225	pd->dma_dcsr = DCSR(dma);
226	DCSR(dma) = pd->dma_dcsr;
227
228	if (pd->dma_dcsr & DCSR_STOPSTATE)
229		complete(&pd->dma_done);
230}
231
232static int __devinit pxa_ata_probe(struct platform_device *pdev)
233{
234	struct ata_host *host;
235	struct ata_port *ap;
236	struct pata_pxa_data *data;
237	struct resource *cmd_res;
238	struct resource *ctl_res;
239	struct resource *dma_res;
240	struct resource *irq_res;
241	struct pata_pxa_pdata *pdata = pdev->dev.platform_data;
 
242	int ret = 0;
243
244	/*
245	 * Resource validation, three resources are needed:
246	 *  - CMD port base address
247	 *  - CTL port base address
248	 *  - DMA port base address
249	 *  - IRQ pin
250	 */
251	if (pdev->num_resources != 4) {
252		dev_err(&pdev->dev, "invalid number of resources\n");
253		return -EINVAL;
254	}
255
256	/*
257	 * CMD port base address
258	 */
259	cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
260	if (unlikely(cmd_res == NULL))
261		return -EINVAL;
262
263	/*
264	 * CTL port base address
265	 */
266	ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
267	if (unlikely(ctl_res == NULL))
268		return -EINVAL;
269
270	/*
271	 * DMA port base address
272	 */
273	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
274	if (unlikely(dma_res == NULL))
275		return -EINVAL;
276
277	/*
278	 * IRQ pin
279	 */
280	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
281	if (unlikely(irq_res == NULL))
282		return -EINVAL;
283
284	/*
285	 * Allocate the host
286	 */
287	host = ata_host_alloc(&pdev->dev, 1);
288	if (!host)
289		return -ENOMEM;
290
291	ap		= host->ports[0];
292	ap->ops		= &pxa_ata_port_ops;
293	ap->pio_mask	= ATA_PIO4;
294	ap->mwdma_mask	= ATA_MWDMA2;
295
296	ap->ioaddr.cmd_addr	= devm_ioremap(&pdev->dev, cmd_res->start,
297						resource_size(cmd_res));
298	ap->ioaddr.ctl_addr	= devm_ioremap(&pdev->dev, ctl_res->start,
299						resource_size(ctl_res));
300	ap->ioaddr.bmdma_addr	= devm_ioremap(&pdev->dev, dma_res->start,
301						resource_size(dma_res));
302
303	/*
304	 * Adjust register offsets
305	 */
306	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
307	ap->ioaddr.data_addr	= ap->ioaddr.cmd_addr +
308					(ATA_REG_DATA << pdata->reg_shift);
309	ap->ioaddr.error_addr	= ap->ioaddr.cmd_addr +
310					(ATA_REG_ERR << pdata->reg_shift);
311	ap->ioaddr.feature_addr	= ap->ioaddr.cmd_addr +
312					(ATA_REG_FEATURE << pdata->reg_shift);
313	ap->ioaddr.nsect_addr	= ap->ioaddr.cmd_addr +
314					(ATA_REG_NSECT << pdata->reg_shift);
315	ap->ioaddr.lbal_addr	= ap->ioaddr.cmd_addr +
316					(ATA_REG_LBAL << pdata->reg_shift);
317	ap->ioaddr.lbam_addr	= ap->ioaddr.cmd_addr +
318					(ATA_REG_LBAM << pdata->reg_shift);
319	ap->ioaddr.lbah_addr	= ap->ioaddr.cmd_addr +
320					(ATA_REG_LBAH << pdata->reg_shift);
321	ap->ioaddr.device_addr	= ap->ioaddr.cmd_addr +
322					(ATA_REG_DEVICE << pdata->reg_shift);
323	ap->ioaddr.status_addr	= ap->ioaddr.cmd_addr +
324					(ATA_REG_STATUS << pdata->reg_shift);
325	ap->ioaddr.command_addr	= ap->ioaddr.cmd_addr +
326					(ATA_REG_CMD << pdata->reg_shift);
327
328	/*
329	 * Allocate and load driver's internal data structure
330	 */
331	data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
332								GFP_KERNEL);
333	if (!data)
334		return -ENOMEM;
335
336	ap->private_data = data;
337	data->dma_dreq = pdata->dma_dreq;
338	data->dma_io_addr = dma_res->start;
339
340	/*
341	 * Allocate space for the DMA descriptors
342	 */
343	data->dma_desc = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
344					&data->dma_desc_addr, GFP_KERNEL);
345	if (!data->dma_desc)
346		return -EINVAL;
347
348	/*
349	 * Request the DMA channel
350	 */
351	data->dma_channel = pxa_request_dma(DRV_NAME, DMA_PRIO_LOW,
352						pxa_ata_dma_irq, ap);
353	if (data->dma_channel < 0)
354		return -EBUSY;
355
356	/*
357	 * Stop and clear the DMA channel
358	 */
359	DCSR(data->dma_channel) = 0;
360
361	/*
362	 * Activate the ATA host
363	 */
364	ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
365				pdata->irq_flags, &pxa_ata_sht);
366	if (ret)
367		pxa_free_dma(data->dma_channel);
368
369	return ret;
370}
371
372static int __devexit pxa_ata_remove(struct platform_device *pdev)
373{
374	struct ata_host *host = dev_get_drvdata(&pdev->dev);
375	struct pata_pxa_data *data = host->ports[0]->private_data;
376
377	pxa_free_dma(data->dma_channel);
378
379	ata_host_detach(host);
380
381	return 0;
382}
383
384static struct platform_driver pxa_ata_driver = {
385	.probe		= pxa_ata_probe,
386	.remove		= __devexit_p(pxa_ata_remove),
387	.driver		= {
388		.name		= DRV_NAME,
389		.owner		= THIS_MODULE,
390	},
391};
392
393static int __init pxa_ata_init(void)
394{
395	return platform_driver_register(&pxa_ata_driver);
396}
397
398static void __exit pxa_ata_exit(void)
399{
400	platform_driver_unregister(&pxa_ata_driver);
401}
402
403module_init(pxa_ata_init);
404module_exit(pxa_ata_exit);
405
406MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
407MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
408MODULE_LICENSE("GPL");
409MODULE_VERSION(DRV_VERSION);
410MODULE_ALIAS("platform:" DRV_NAME);