Linux Audio

Check our new training course

Loading...
  1/*
  2 *  linux/drivers/mtd/onenand/omap2.c
  3 *
  4 *  OneNAND driver for OMAP2 / OMAP3
  5 *
  6 *  Copyright © 2005-2006 Nokia Corporation
  7 *
  8 *  Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
  9 *  IRQ and DMA support written by Timo Teras
 10 *
 11 * This program is free software; you can redistribute it and/or modify it
 12 * under the terms of the GNU General Public License version 2 as published by
 13 * the Free Software Foundation.
 14 *
 15 * This program is distributed in the hope that it will be useful, but WITHOUT
 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
 18 * more details.
 19 *
 20 * You should have received a copy of the GNU General Public License along with
 21 * this program; see the file COPYING. If not, write to the Free Software
 22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 23 *
 24 */
 25
 26#include <linux/device.h>
 27#include <linux/module.h>
 28#include <linux/init.h>
 29#include <linux/mtd/mtd.h>
 30#include <linux/mtd/onenand.h>
 31#include <linux/mtd/partitions.h>
 32#include <linux/platform_device.h>
 33#include <linux/interrupt.h>
 34#include <linux/delay.h>
 35#include <linux/dma-mapping.h>
 36#include <linux/io.h>
 37#include <linux/slab.h>
 38#include <linux/regulator/consumer.h>
 39
 40#include <asm/mach/flash.h>
 41#include <plat/gpmc.h>
 42#include <plat/onenand.h>
 43#include <asm/gpio.h>
 44
 45#include <plat/dma.h>
 46
 47#include <plat/board.h>
 48
 49#define DRIVER_NAME "omap2-onenand"
 50
 51#define ONENAND_IO_SIZE		SZ_128K
 52#define ONENAND_BUFRAM_SIZE	(1024 * 5)
 53
 54struct omap2_onenand {
 55	struct platform_device *pdev;
 56	int gpmc_cs;
 57	unsigned long phys_base;
 58	int gpio_irq;
 59	struct mtd_info mtd;
 60	struct onenand_chip onenand;
 61	struct completion irq_done;
 62	struct completion dma_done;
 63	int dma_channel;
 64	int freq;
 65	int (*setup)(void __iomem *base, int *freq_ptr);
 66	struct regulator *regulator;
 67};
 68
 69static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
 70{
 71	struct omap2_onenand *c = data;
 72
 73	complete(&c->dma_done);
 74}
 75
 76static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
 77{
 78	struct omap2_onenand *c = dev_id;
 79
 80	complete(&c->irq_done);
 81
 82	return IRQ_HANDLED;
 83}
 84
 85static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
 86{
 87	return readw(c->onenand.base + reg);
 88}
 89
 90static inline void write_reg(struct omap2_onenand *c, unsigned short value,
 91			     int reg)
 92{
 93	writew(value, c->onenand.base + reg);
 94}
 95
 96static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
 97{
 98	printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
 99	       msg, state, ctrl, intr);
100}
101
102static void wait_warn(char *msg, int state, unsigned int ctrl,
103		      unsigned int intr)
104{
105	printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
106	       "intr 0x%04x\n", msg, state, ctrl, intr);
107}
108
109static int omap2_onenand_wait(struct mtd_info *mtd, int state)
110{
111	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
112	struct onenand_chip *this = mtd->priv;
113	unsigned int intr = 0;
114	unsigned int ctrl, ctrl_mask;
115	unsigned long timeout;
116	u32 syscfg;
117
118	if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
119	    state == FL_VERIFYING_ERASE) {
120		int i = 21;
121		unsigned int intr_flags = ONENAND_INT_MASTER;
122
123		switch (state) {
124		case FL_RESETING:
125			intr_flags |= ONENAND_INT_RESET;
126			break;
127		case FL_PREPARING_ERASE:
128			intr_flags |= ONENAND_INT_ERASE;
129			break;
130		case FL_VERIFYING_ERASE:
131			i = 101;
132			break;
133		}
134
135		while (--i) {
136			udelay(1);
137			intr = read_reg(c, ONENAND_REG_INTERRUPT);
138			if (intr & ONENAND_INT_MASTER)
139				break;
140		}
141		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
142		if (ctrl & ONENAND_CTRL_ERROR) {
143			wait_err("controller error", state, ctrl, intr);
144			return -EIO;
145		}
146		if ((intr & intr_flags) == intr_flags)
147			return 0;
148		/* Continue in wait for interrupt branch */
149	}
150
151	if (state != FL_READING) {
152		int result;
153
154		/* Turn interrupts on */
155		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
156		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
157			syscfg |= ONENAND_SYS_CFG1_IOBE;
158			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
159			if (cpu_is_omap34xx())
160				/* Add a delay to let GPIO settle */
161				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
162		}
163
164		INIT_COMPLETION(c->irq_done);
165		if (c->gpio_irq) {
166			result = gpio_get_value(c->gpio_irq);
167			if (result == -1) {
168				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
169				intr = read_reg(c, ONENAND_REG_INTERRUPT);
170				wait_err("gpio error", state, ctrl, intr);
171				return -EIO;
172			}
173		} else
174			result = 0;
175		if (result == 0) {
176			int retry_cnt = 0;
177retry:
178			result = wait_for_completion_timeout(&c->irq_done,
179						    msecs_to_jiffies(20));
180			if (result == 0) {
181				/* Timeout after 20ms */
182				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
183				if (ctrl & ONENAND_CTRL_ONGO &&
184				    !this->ongoing) {
185					/*
186					 * The operation seems to be still going
187					 * so give it some more time.
188					 */
189					retry_cnt += 1;
190					if (retry_cnt < 3)
191						goto retry;
192					intr = read_reg(c,
193							ONENAND_REG_INTERRUPT);
194					wait_err("timeout", state, ctrl, intr);
195					return -EIO;
196				}
197				intr = read_reg(c, ONENAND_REG_INTERRUPT);
198				if ((intr & ONENAND_INT_MASTER) == 0)
199					wait_warn("timeout", state, ctrl, intr);
200			}
201		}
202	} else {
203		int retry_cnt = 0;
204
205		/* Turn interrupts off */
206		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
207		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
208		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
209
210		timeout = jiffies + msecs_to_jiffies(20);
211		while (1) {
212			if (time_before(jiffies, timeout)) {
213				intr = read_reg(c, ONENAND_REG_INTERRUPT);
214				if (intr & ONENAND_INT_MASTER)
215					break;
216			} else {
217				/* Timeout after 20ms */
218				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
219				if (ctrl & ONENAND_CTRL_ONGO) {
220					/*
221					 * The operation seems to be still going
222					 * so give it some more time.
223					 */
224					retry_cnt += 1;
225					if (retry_cnt < 3) {
226						timeout = jiffies +
227							  msecs_to_jiffies(20);
228						continue;
229					}
230				}
231				break;
232			}
233		}
234	}
235
236	intr = read_reg(c, ONENAND_REG_INTERRUPT);
237	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
238
239	if (intr & ONENAND_INT_READ) {
240		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
241
242		if (ecc) {
243			unsigned int addr1, addr8;
244
245			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
246			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
247			if (ecc & ONENAND_ECC_2BIT_ALL) {
248				printk(KERN_ERR "onenand_wait: ECC error = "
249				       "0x%04x, addr1 %#x, addr8 %#x\n",
250				       ecc, addr1, addr8);
251				mtd->ecc_stats.failed++;
252				return -EBADMSG;
253			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
254				printk(KERN_NOTICE "onenand_wait: correctable "
255				       "ECC error = 0x%04x, addr1 %#x, "
256				       "addr8 %#x\n", ecc, addr1, addr8);
257				mtd->ecc_stats.corrected++;
258			}
259		}
260	} else if (state == FL_READING) {
261		wait_err("timeout", state, ctrl, intr);
262		return -EIO;
263	}
264
265	if (ctrl & ONENAND_CTRL_ERROR) {
266		wait_err("controller error", state, ctrl, intr);
267		if (ctrl & ONENAND_CTRL_LOCK)
268			printk(KERN_ERR "onenand_wait: "
269					"Device is write protected!!!\n");
270		return -EIO;
271	}
272
273	ctrl_mask = 0xFE9F;
274	if (this->ongoing)
275		ctrl_mask &= ~0x8000;
276
277	if (ctrl & ctrl_mask)
278		wait_warn("unexpected controller status", state, ctrl, intr);
279
280	return 0;
281}
282
283static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
284{
285	struct onenand_chip *this = mtd->priv;
286
287	if (ONENAND_CURRENT_BUFFERRAM(this)) {
288		if (area == ONENAND_DATARAM)
289			return this->writesize;
290		if (area == ONENAND_SPARERAM)
291			return mtd->oobsize;
292	}
293
294	return 0;
295}
296
297#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
298
299static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
300					unsigned char *buffer, int offset,
301					size_t count)
302{
303	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
304	struct onenand_chip *this = mtd->priv;
305	dma_addr_t dma_src, dma_dst;
306	int bram_offset;
307	unsigned long timeout;
308	void *buf = (void *)buffer;
309	size_t xtra;
310	volatile unsigned *done;
311
312	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
313	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
314		goto out_copy;
315
316	/* panic_write() may be in an interrupt context */
317	if (in_interrupt() || oops_in_progress)
318		goto out_copy;
319
320	if (buf >= high_memory) {
321		struct page *p1;
322
323		if (((size_t)buf & PAGE_MASK) !=
324		    ((size_t)(buf + count - 1) & PAGE_MASK))
325			goto out_copy;
326		p1 = vmalloc_to_page(buf);
327		if (!p1)
328			goto out_copy;
329		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
330	}
331
332	xtra = count & 3;
333	if (xtra) {
334		count -= xtra;
335		memcpy(buf + count, this->base + bram_offset + count, xtra);
336	}
337
338	dma_src = c->phys_base + bram_offset;
339	dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
340	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
341		dev_err(&c->pdev->dev,
342			"Couldn't DMA map a %d byte buffer\n",
343			count);
344		goto out_copy;
345	}
346
347	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
348				     count >> 2, 1, 0, 0, 0);
349	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
350				dma_src, 0, 0);
351	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
352				 dma_dst, 0, 0);
353
354	INIT_COMPLETION(c->dma_done);
355	omap_start_dma(c->dma_channel);
356
357	timeout = jiffies + msecs_to_jiffies(20);
358	done = &c->dma_done.done;
359	while (time_before(jiffies, timeout))
360		if (*done)
361			break;
362
363	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
364
365	if (!*done) {
366		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
367		goto out_copy;
368	}
369
370	return 0;
371
372out_copy:
373	memcpy(buf, this->base + bram_offset, count);
374	return 0;
375}
376
377static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
378					 const unsigned char *buffer,
379					 int offset, size_t count)
380{
381	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
382	struct onenand_chip *this = mtd->priv;
383	dma_addr_t dma_src, dma_dst;
384	int bram_offset;
385	unsigned long timeout;
386	void *buf = (void *)buffer;
387	volatile unsigned *done;
388
389	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
390	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
391		goto out_copy;
392
393	/* panic_write() may be in an interrupt context */
394	if (in_interrupt() || oops_in_progress)
395		goto out_copy;
396
397	if (buf >= high_memory) {
398		struct page *p1;
399
400		if (((size_t)buf & PAGE_MASK) !=
401		    ((size_t)(buf + count - 1) & PAGE_MASK))
402			goto out_copy;
403		p1 = vmalloc_to_page(buf);
404		if (!p1)
405			goto out_copy;
406		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
407	}
408
409	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
410	dma_dst = c->phys_base + bram_offset;
411	if (dma_mapping_error(&c->pdev->dev, dma_src)) {
412		dev_err(&c->pdev->dev,
413			"Couldn't DMA map a %d byte buffer\n",
414			count);
415		return -1;
416	}
417
418	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
419				     count >> 2, 1, 0, 0, 0);
420	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
421				dma_src, 0, 0);
422	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
423				 dma_dst, 0, 0);
424
425	INIT_COMPLETION(c->dma_done);
426	omap_start_dma(c->dma_channel);
427
428	timeout = jiffies + msecs_to_jiffies(20);
429	done = &c->dma_done.done;
430	while (time_before(jiffies, timeout))
431		if (*done)
432			break;
433
434	dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
435
436	if (!*done) {
437		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
438		goto out_copy;
439	}
440
441	return 0;
442
443out_copy:
444	memcpy(this->base + bram_offset, buf, count);
445	return 0;
446}
447
448#else
449
450int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
451				 unsigned char *buffer, int offset,
452				 size_t count);
453
454int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
455				  const unsigned char *buffer,
456				  int offset, size_t count);
457
458#endif
459
460#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
461
462static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
463					unsigned char *buffer, int offset,
464					size_t count)
465{
466	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
467	struct onenand_chip *this = mtd->priv;
468	dma_addr_t dma_src, dma_dst;
469	int bram_offset;
470
471	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
472	/* DMA is not used.  Revisit PM requirements before enabling it. */
473	if (1 || (c->dma_channel < 0) ||
474	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
475	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
476		memcpy(buffer, (__force void *)(this->base + bram_offset),
477		       count);
478		return 0;
479	}
480
481	dma_src = c->phys_base + bram_offset;
482	dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
483				 DMA_FROM_DEVICE);
484	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
485		dev_err(&c->pdev->dev,
486			"Couldn't DMA map a %d byte buffer\n",
487			count);
488		return -1;
489	}
490
491	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
492				     count / 4, 1, 0, 0, 0);
493	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
494				dma_src, 0, 0);
495	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
496				 dma_dst, 0, 0);
497
498	INIT_COMPLETION(c->dma_done);
499	omap_start_dma(c->dma_channel);
500	wait_for_completion(&c->dma_done);
501
502	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
503
504	return 0;
505}
506
507static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
508					 const unsigned char *buffer,
509					 int offset, size_t count)
510{
511	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
512	struct onenand_chip *this = mtd->priv;
513	dma_addr_t dma_src, dma_dst;
514	int bram_offset;
515
516	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
517	/* DMA is not used.  Revisit PM requirements before enabling it. */
518	if (1 || (c->dma_channel < 0) ||
519	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
520	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
521		memcpy((__force void *)(this->base + bram_offset), buffer,
522		       count);
523		return 0;
524	}
525
526	dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
527				 DMA_TO_DEVICE);
528	dma_dst = c->phys_base + bram_offset;
529	if (dma_mapping_error(&c->pdev->dev, dma_src)) {
530		dev_err(&c->pdev->dev,
531			"Couldn't DMA map a %d byte buffer\n",
532			count);
533		return -1;
534	}
535
536	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
537				     count / 2, 1, 0, 0, 0);
538	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
539				dma_src, 0, 0);
540	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
541				 dma_dst, 0, 0);
542
543	INIT_COMPLETION(c->dma_done);
544	omap_start_dma(c->dma_channel);
545	wait_for_completion(&c->dma_done);
546
547	dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
548
549	return 0;
550}
551
552#else
553
554int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
555				 unsigned char *buffer, int offset,
556				 size_t count);
557
558int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
559				  const unsigned char *buffer,
560				  int offset, size_t count);
561
562#endif
563
564static struct platform_driver omap2_onenand_driver;
565
566static int __adjust_timing(struct device *dev, void *data)
567{
568	int ret = 0;
569	struct omap2_onenand *c;
570
571	c = dev_get_drvdata(dev);
572
573	BUG_ON(c->setup == NULL);
574
575	/* DMA is not in use so this is all that is needed */
576	/* Revisit for OMAP3! */
577	ret = c->setup(c->onenand.base, &c->freq);
578
579	return ret;
580}
581
582int omap2_onenand_rephase(void)
583{
584	return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
585				      NULL, __adjust_timing);
586}
587
588static void omap2_onenand_shutdown(struct platform_device *pdev)
589{
590	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
591
592	/* With certain content in the buffer RAM, the OMAP boot ROM code
593	 * can recognize the flash chip incorrectly. Zero it out before
594	 * soft reset.
595	 */
596	memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
597}
598
599static int omap2_onenand_enable(struct mtd_info *mtd)
600{
601	int ret;
602	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
603
604	ret = regulator_enable(c->regulator);
605	if (ret != 0)
606		dev_err(&c->pdev->dev, "can't enable regulator\n");
607
608	return ret;
609}
610
611static int omap2_onenand_disable(struct mtd_info *mtd)
612{
613	int ret;
614	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
615
616	ret = regulator_disable(c->regulator);
617	if (ret != 0)
618		dev_err(&c->pdev->dev, "can't disable regulator\n");
619
620	return ret;
621}
622
623static int __devinit omap2_onenand_probe(struct platform_device *pdev)
624{
625	struct omap_onenand_platform_data *pdata;
626	struct omap2_onenand *c;
627	struct onenand_chip *this;
628	int r;
629
630	pdata = pdev->dev.platform_data;
631	if (pdata == NULL) {
632		dev_err(&pdev->dev, "platform data missing\n");
633		return -ENODEV;
634	}
635
636	c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
637	if (!c)
638		return -ENOMEM;
639
640	init_completion(&c->irq_done);
641	init_completion(&c->dma_done);
642	c->gpmc_cs = pdata->cs;
643	c->gpio_irq = pdata->gpio_irq;
644	c->dma_channel = pdata->dma_channel;
645	if (c->dma_channel < 0) {
646		/* if -1, don't use DMA */
647		c->gpio_irq = 0;
648	}
649
650	r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
651	if (r < 0) {
652		dev_err(&pdev->dev, "Cannot request GPMC CS\n");
653		goto err_kfree;
654	}
655
656	if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
657			       pdev->dev.driver->name) == NULL) {
658		dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
659			"size: 0x%x\n",	c->phys_base, ONENAND_IO_SIZE);
660		r = -EBUSY;
661		goto err_free_cs;
662	}
663	c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
664	if (c->onenand.base == NULL) {
665		r = -ENOMEM;
666		goto err_release_mem_region;
667	}
668
669	if (pdata->onenand_setup != NULL) {
670		r = pdata->onenand_setup(c->onenand.base, &c->freq);
671		if (r < 0) {
672			dev_err(&pdev->dev, "Onenand platform setup failed: "
673				"%d\n", r);
674			goto err_iounmap;
675		}
676		c->setup = pdata->onenand_setup;
677	}
678
679	if (c->gpio_irq) {
680		if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
681			dev_err(&pdev->dev,  "Failed to request GPIO%d for "
682				"OneNAND\n", c->gpio_irq);
683			goto err_iounmap;
684	}
685	gpio_direction_input(c->gpio_irq);
686
687	if ((r = request_irq(gpio_to_irq(c->gpio_irq),
688			     omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
689			     pdev->dev.driver->name, c)) < 0)
690		goto err_release_gpio;
691	}
692
693	if (c->dma_channel >= 0) {
694		r = omap_request_dma(0, pdev->dev.driver->name,
695				     omap2_onenand_dma_cb, (void *) c,
696				     &c->dma_channel);
697		if (r == 0) {
698			omap_set_dma_write_mode(c->dma_channel,
699						OMAP_DMA_WRITE_NON_POSTED);
700			omap_set_dma_src_data_pack(c->dma_channel, 1);
701			omap_set_dma_src_burst_mode(c->dma_channel,
702						    OMAP_DMA_DATA_BURST_8);
703			omap_set_dma_dest_data_pack(c->dma_channel, 1);
704			omap_set_dma_dest_burst_mode(c->dma_channel,
705						     OMAP_DMA_DATA_BURST_8);
706		} else {
707			dev_info(&pdev->dev,
708				 "failed to allocate DMA for OneNAND, "
709				 "using PIO instead\n");
710			c->dma_channel = -1;
711		}
712	}
713
714	dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
715		 "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
716		 c->onenand.base, c->freq);
717
718	c->pdev = pdev;
719	c->mtd.name = dev_name(&pdev->dev);
720	c->mtd.priv = &c->onenand;
721	c->mtd.owner = THIS_MODULE;
722
723	c->mtd.dev.parent = &pdev->dev;
724
725	this = &c->onenand;
726	if (c->dma_channel >= 0) {
727		this->wait = omap2_onenand_wait;
728		if (cpu_is_omap34xx()) {
729			this->read_bufferram = omap3_onenand_read_bufferram;
730			this->write_bufferram = omap3_onenand_write_bufferram;
731		} else {
732			this->read_bufferram = omap2_onenand_read_bufferram;
733			this->write_bufferram = omap2_onenand_write_bufferram;
734		}
735	}
736
737	if (pdata->regulator_can_sleep) {
738		c->regulator = regulator_get(&pdev->dev, "vonenand");
739		if (IS_ERR(c->regulator)) {
740			dev_err(&pdev->dev,  "Failed to get regulator\n");
741			r = PTR_ERR(c->regulator);
742			goto err_release_dma;
743		}
744		c->onenand.enable = omap2_onenand_enable;
745		c->onenand.disable = omap2_onenand_disable;
746	}
747
748	if (pdata->skip_initial_unlocking)
749		this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
750
751	if ((r = onenand_scan(&c->mtd, 1)) < 0)
752		goto err_release_regulator;
753
754	r = mtd_device_parse_register(&c->mtd, NULL, NULL,
755				      pdata ? pdata->parts : NULL,
756				      pdata ? pdata->nr_parts : 0);
757	if (r)
758		goto err_release_onenand;
759
760	platform_set_drvdata(pdev, c);
761
762	return 0;
763
764err_release_onenand:
765	onenand_release(&c->mtd);
766err_release_regulator:
767	regulator_put(c->regulator);
768err_release_dma:
769	if (c->dma_channel != -1)
770		omap_free_dma(c->dma_channel);
771	if (c->gpio_irq)
772		free_irq(gpio_to_irq(c->gpio_irq), c);
773err_release_gpio:
774	if (c->gpio_irq)
775		gpio_free(c->gpio_irq);
776err_iounmap:
777	iounmap(c->onenand.base);
778err_release_mem_region:
779	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
780err_free_cs:
781	gpmc_cs_free(c->gpmc_cs);
782err_kfree:
783	kfree(c);
784
785	return r;
786}
787
788static int __devexit omap2_onenand_remove(struct platform_device *pdev)
789{
790	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
791
792	onenand_release(&c->mtd);
793	regulator_put(c->regulator);
794	if (c->dma_channel != -1)
795		omap_free_dma(c->dma_channel);
796	omap2_onenand_shutdown(pdev);
797	platform_set_drvdata(pdev, NULL);
798	if (c->gpio_irq) {
799		free_irq(gpio_to_irq(c->gpio_irq), c);
800		gpio_free(c->gpio_irq);
801	}
802	iounmap(c->onenand.base);
803	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
804	gpmc_cs_free(c->gpmc_cs);
805	kfree(c);
806
807	return 0;
808}
809
810static struct platform_driver omap2_onenand_driver = {
811	.probe		= omap2_onenand_probe,
812	.remove		= __devexit_p(omap2_onenand_remove),
813	.shutdown	= omap2_onenand_shutdown,
814	.driver		= {
815		.name	= DRIVER_NAME,
816		.owner  = THIS_MODULE,
817	},
818};
819
820static int __init omap2_onenand_init(void)
821{
822	printk(KERN_INFO "OneNAND driver initializing\n");
823	return platform_driver_register(&omap2_onenand_driver);
824}
825
826static void __exit omap2_onenand_exit(void)
827{
828	platform_driver_unregister(&omap2_onenand_driver);
829}
830
831module_init(omap2_onenand_init);
832module_exit(omap2_onenand_exit);
833
834MODULE_ALIAS("platform:" DRIVER_NAME);
835MODULE_LICENSE("GPL");
836MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
837MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");