Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * drivers/mtd/nand/pxa3xx_nand.c
   3 *
   4 * Copyright © 2005 Intel Corporation
   5 * Copyright © 2006 Marvell International Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/platform_device.h>
  18#include <linux/dmaengine.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/dma/pxa-dma.h>
  21#include <linux/delay.h>
  22#include <linux/clk.h>
  23#include <linux/mtd/mtd.h>
  24#include <linux/mtd/nand.h>
  25#include <linux/mtd/partitions.h>
  26#include <linux/io.h>
  27#include <linux/iopoll.h>
  28#include <linux/irq.h>
  29#include <linux/slab.h>
  30#include <linux/of.h>
  31#include <linux/of_device.h>
  32#include <linux/of_mtd.h>
  33#include <linux/platform_data/mtd-nand-pxa3xx.h>
  34
  35#define	CHIP_DELAY_TIMEOUT	msecs_to_jiffies(200)
  36#define NAND_STOP_DELAY		msecs_to_jiffies(40)
  37#define PAGE_CHUNK_SIZE		(2048)
  38
  39/*
  40 * Define a buffer size for the initial command that detects the flash device:
  41 * STATUS, READID and PARAM.
  42 * ONFI param page is 256 bytes, and there are three redundant copies
  43 * to be read. JEDEC param page is 512 bytes, and there are also three
  44 * redundant copies to be read.
  45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
  46 */
  47#define INIT_BUFFER_SIZE	2048
  48
  49/* registers and bit definitions */
  50#define NDCR		(0x00) /* Control register */
  51#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
  52#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
  53#define NDSR		(0x14) /* Status Register */
  54#define NDPCR		(0x18) /* Page Count Register */
  55#define NDBDR0		(0x1C) /* Bad Block Register 0 */
  56#define NDBDR1		(0x20) /* Bad Block Register 1 */
  57#define NDECCCTRL	(0x28) /* ECC control */
  58#define NDDB		(0x40) /* Data Buffer */
  59#define NDCB0		(0x48) /* Command Buffer0 */
  60#define NDCB1		(0x4C) /* Command Buffer1 */
  61#define NDCB2		(0x50) /* Command Buffer2 */
  62
  63#define NDCR_SPARE_EN		(0x1 << 31)
  64#define NDCR_ECC_EN		(0x1 << 30)
  65#define NDCR_DMA_EN		(0x1 << 29)
  66#define NDCR_ND_RUN		(0x1 << 28)
  67#define NDCR_DWIDTH_C		(0x1 << 27)
  68#define NDCR_DWIDTH_M		(0x1 << 26)
  69#define NDCR_PAGE_SZ		(0x1 << 24)
  70#define NDCR_NCSX		(0x1 << 23)
  71#define NDCR_ND_MODE		(0x3 << 21)
  72#define NDCR_NAND_MODE   	(0x0)
  73#define NDCR_CLR_PG_CNT		(0x1 << 20)
  74#define NFCV1_NDCR_ARB_CNTL	(0x1 << 19)
  75#define NFCV2_NDCR_STOP_ON_UNCOR	(0x1 << 19)
  76#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
  77#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
  78
  79#define NDCR_RA_START		(0x1 << 15)
  80#define NDCR_PG_PER_BLK		(0x1 << 14)
  81#define NDCR_ND_ARB_EN		(0x1 << 12)
  82#define NDCR_INT_MASK           (0xFFF)
  83
  84#define NDSR_MASK		(0xfff)
  85#define NDSR_ERR_CNT_OFF	(16)
  86#define NDSR_ERR_CNT_MASK       (0x1f)
  87#define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
  88#define NDSR_RDY                (0x1 << 12)
  89#define NDSR_FLASH_RDY          (0x1 << 11)
  90#define NDSR_CS0_PAGED		(0x1 << 10)
  91#define NDSR_CS1_PAGED		(0x1 << 9)
  92#define NDSR_CS0_CMDD		(0x1 << 8)
  93#define NDSR_CS1_CMDD		(0x1 << 7)
  94#define NDSR_CS0_BBD		(0x1 << 6)
  95#define NDSR_CS1_BBD		(0x1 << 5)
  96#define NDSR_UNCORERR		(0x1 << 4)
  97#define NDSR_CORERR		(0x1 << 3)
  98#define NDSR_WRDREQ		(0x1 << 2)
  99#define NDSR_RDDREQ		(0x1 << 1)
 100#define NDSR_WRCMDREQ		(0x1)
 101
 102#define NDCB0_LEN_OVRD		(0x1 << 28)
 103#define NDCB0_ST_ROW_EN         (0x1 << 26)
 104#define NDCB0_AUTO_RS		(0x1 << 25)
 105#define NDCB0_CSEL		(0x1 << 24)
 106#define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
 107#define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
 108#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
 109#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
 110#define NDCB0_NC		(0x1 << 20)
 111#define NDCB0_DBC		(0x1 << 19)
 112#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
 113#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
 114#define NDCB0_CMD2_MASK		(0xff << 8)
 115#define NDCB0_CMD1_MASK		(0xff)
 116#define NDCB0_ADDR_CYC_SHIFT	(16)
 117
 118#define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
 119#define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
 120#define EXT_CMD_TYPE_READ	4 /* Read */
 121#define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
 122#define EXT_CMD_TYPE_FINAL	3 /* Final command */
 123#define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
 124#define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */
 125
 126/*
 127 * This should be large enough to read 'ONFI' and 'JEDEC'.
 128 * Let's use 7 bytes, which is the maximum ID count supported
 129 * by the controller (see NDCR_RD_ID_CNT_MASK).
 130 */
 131#define READ_ID_BYTES		7
 132
 133/* macros for registers read/write */
 134#define nand_writel(info, off, val)					\
 135	do {								\
 136		dev_vdbg(&info->pdev->dev,				\
 137			 "%s():%d nand_writel(0x%x, 0x%04x)\n",		\
 138			 __func__, __LINE__, (val), (off));		\
 139		writel_relaxed((val), (info)->mmio_base + (off));	\
 140	} while (0)
 141
 142#define nand_readl(info, off)						\
 143	({								\
 144		unsigned int _v;					\
 145		_v = readl_relaxed((info)->mmio_base + (off));		\
 146		dev_vdbg(&info->pdev->dev,				\
 147			 "%s():%d nand_readl(0x%04x) = 0x%x\n",		\
 148			 __func__, __LINE__, (off), _v);		\
 149		_v;							\
 150	})
 151
 152/* error code and state */
 153enum {
 154	ERR_NONE	= 0,
 155	ERR_DMABUSERR	= -1,
 156	ERR_SENDCMD	= -2,
 157	ERR_UNCORERR	= -3,
 158	ERR_BBERR	= -4,
 159	ERR_CORERR	= -5,
 160};
 161
 162enum {
 163	STATE_IDLE = 0,
 164	STATE_PREPARED,
 165	STATE_CMD_HANDLE,
 166	STATE_DMA_READING,
 167	STATE_DMA_WRITING,
 168	STATE_DMA_DONE,
 169	STATE_PIO_READING,
 170	STATE_PIO_WRITING,
 171	STATE_CMD_DONE,
 172	STATE_READY,
 173};
 174
 175enum pxa3xx_nand_variant {
 176	PXA3XX_NAND_VARIANT_PXA,
 177	PXA3XX_NAND_VARIANT_ARMADA370,
 178};
 179
 180struct pxa3xx_nand_host {
 181	struct nand_chip	chip;
 182	void			*info_data;
 183
 184	/* page size of attached chip */
 185	int			use_ecc;
 186	int			cs;
 187
 188	/* calculated from pxa3xx_nand_flash data */
 189	unsigned int		col_addr_cycles;
 190	unsigned int		row_addr_cycles;
 191};
 192
 193struct pxa3xx_nand_info {
 194	struct nand_hw_control	controller;
 195	struct platform_device	 *pdev;
 196
 197	struct clk		*clk;
 198	void __iomem		*mmio_base;
 199	unsigned long		mmio_phys;
 200	struct completion	cmd_complete, dev_ready;
 201
 202	unsigned int 		buf_start;
 203	unsigned int		buf_count;
 204	unsigned int		buf_size;
 205	unsigned int		data_buff_pos;
 206	unsigned int		oob_buff_pos;
 207
 208	/* DMA information */
 209	struct scatterlist	sg;
 210	enum dma_data_direction	dma_dir;
 211	struct dma_chan		*dma_chan;
 212	dma_cookie_t		dma_cookie;
 213	int			drcmr_dat;
 214
 215	unsigned char		*data_buff;
 216	unsigned char		*oob_buff;
 217	dma_addr_t 		data_buff_phys;
 218	int 			data_dma_ch;
 219
 220	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
 221	unsigned int		state;
 222
 223	/*
 224	 * This driver supports NFCv1 (as found in PXA SoC)
 225	 * and NFCv2 (as found in Armada 370/XP SoC).
 226	 */
 227	enum pxa3xx_nand_variant variant;
 228
 229	int			cs;
 230	int			use_ecc;	/* use HW ECC ? */
 231	int			ecc_bch;	/* using BCH ECC? */
 232	int			use_dma;	/* use DMA ? */
 233	int			use_spare;	/* use spare ? */
 234	int			need_wait;
 235
 236	/* Amount of real data per full chunk */
 237	unsigned int		chunk_size;
 238
 239	/* Amount of spare data per full chunk */
 240	unsigned int		spare_size;
 241
 242	/* Number of full chunks (i.e chunk_size + spare_size) */
 243	unsigned int            nfullchunks;
 244
 245	/*
 246	 * Total number of chunks. If equal to nfullchunks, then there
 247	 * are only full chunks. Otherwise, there is one last chunk of
 248	 * size (last_chunk_size + last_spare_size)
 249	 */
 250	unsigned int            ntotalchunks;
 251
 252	/* Amount of real data in the last chunk */
 253	unsigned int		last_chunk_size;
 254
 255	/* Amount of spare data in the last chunk */
 256	unsigned int		last_spare_size;
 257
 258	unsigned int		ecc_size;
 259	unsigned int		ecc_err_cnt;
 260	unsigned int		max_bitflips;
 261	int 			retcode;
 262
 263	/*
 264	 * Variables only valid during command
 265	 * execution. step_chunk_size and step_spare_size is the
 266	 * amount of real data and spare data in the current
 267	 * chunk. cur_chunk is the current chunk being
 268	 * read/programmed.
 269	 */
 270	unsigned int		step_chunk_size;
 271	unsigned int		step_spare_size;
 272	unsigned int            cur_chunk;
 273
 274	/* cached register value */
 275	uint32_t		reg_ndcr;
 276	uint32_t		ndtr0cs0;
 277	uint32_t		ndtr1cs0;
 278
 279	/* generated NDCBx register values */
 280	uint32_t		ndcb0;
 281	uint32_t		ndcb1;
 282	uint32_t		ndcb2;
 283	uint32_t		ndcb3;
 284};
 285
 286static bool use_dma = 1;
 287module_param(use_dma, bool, 0444);
 288MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
 289
 290struct pxa3xx_nand_timing {
 291	unsigned int	tCH;  /* Enable signal hold time */
 292	unsigned int	tCS;  /* Enable signal setup time */
 293	unsigned int	tWH;  /* ND_nWE high duration */
 294	unsigned int	tWP;  /* ND_nWE pulse time */
 295	unsigned int	tRH;  /* ND_nRE high duration */
 296	unsigned int	tRP;  /* ND_nRE pulse width */
 297	unsigned int	tR;   /* ND_nWE high to ND_nRE low for read */
 298	unsigned int	tWHR; /* ND_nWE high to ND_nRE low for status read */
 299	unsigned int	tAR;  /* ND_ALE low to ND_nRE low delay */
 300};
 301
 302struct pxa3xx_nand_flash {
 303	uint32_t	chip_id;
 304	unsigned int	flash_width;	/* Width of Flash memory (DWIDTH_M) */
 305	unsigned int	dfc_width;	/* Width of flash controller(DWIDTH_C) */
 306	struct pxa3xx_nand_timing *timing;	/* NAND Flash timing */
 307};
 308
 309static struct pxa3xx_nand_timing timing[] = {
 310	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
 311	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
 312	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
 313	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
 314};
 315
 316static struct pxa3xx_nand_flash builtin_flash_types[] = {
 317	{ 0x46ec, 16, 16, &timing[1] },
 318	{ 0xdaec,  8,  8, &timing[1] },
 319	{ 0xd7ec,  8,  8, &timing[1] },
 320	{ 0xa12c,  8,  8, &timing[2] },
 321	{ 0xb12c, 16, 16, &timing[2] },
 322	{ 0xdc2c,  8,  8, &timing[2] },
 323	{ 0xcc2c, 16, 16, &timing[2] },
 324	{ 0xba20, 16, 16, &timing[3] },
 325};
 326
 327static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
 328static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
 329
 330static struct nand_bbt_descr bbt_main_descr = {
 331	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 332		| NAND_BBT_2BIT | NAND_BBT_VERSION,
 333	.offs =	8,
 334	.len = 6,
 335	.veroffs = 14,
 336	.maxblocks = 8,		/* Last 8 blocks in each chip */
 337	.pattern = bbt_pattern
 338};
 339
 340static struct nand_bbt_descr bbt_mirror_descr = {
 341	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 342		| NAND_BBT_2BIT | NAND_BBT_VERSION,
 343	.offs =	8,
 344	.len = 6,
 345	.veroffs = 14,
 346	.maxblocks = 8,		/* Last 8 blocks in each chip */
 347	.pattern = bbt_mirror_pattern
 348};
 349
 350static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
 351	.eccbytes = 32,
 352	.eccpos = {
 353		32, 33, 34, 35, 36, 37, 38, 39,
 354		40, 41, 42, 43, 44, 45, 46, 47,
 355		48, 49, 50, 51, 52, 53, 54, 55,
 356		56, 57, 58, 59, 60, 61, 62, 63},
 357	.oobfree = { {2, 30} }
 358};
 359
 360static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
 361	.eccbytes = 64,
 362	.eccpos = {
 363		32,  33,  34,  35,  36,  37,  38,  39,
 364		40,  41,  42,  43,  44,  45,  46,  47,
 365		48,  49,  50,  51,  52,  53,  54,  55,
 366		56,  57,  58,  59,  60,  61,  62,  63,
 367		96,  97,  98,  99,  100, 101, 102, 103,
 368		104, 105, 106, 107, 108, 109, 110, 111,
 369		112, 113, 114, 115, 116, 117, 118, 119,
 370		120, 121, 122, 123, 124, 125, 126, 127},
 371	/* Bootrom looks in bytes 0 & 5 for bad blocks */
 372	.oobfree = { {6, 26}, { 64, 32} }
 373};
 374
 375static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
 376	.eccbytes = 128,
 377	.eccpos = {
 378		32,  33,  34,  35,  36,  37,  38,  39,
 379		40,  41,  42,  43,  44,  45,  46,  47,
 380		48,  49,  50,  51,  52,  53,  54,  55,
 381		56,  57,  58,  59,  60,  61,  62,  63},
 382	.oobfree = { }
 383};
 384
 385#define NDTR0_tCH(c)	(min((c), 7) << 19)
 386#define NDTR0_tCS(c)	(min((c), 7) << 16)
 387#define NDTR0_tWH(c)	(min((c), 7) << 11)
 388#define NDTR0_tWP(c)	(min((c), 7) << 8)
 389#define NDTR0_tRH(c)	(min((c), 7) << 3)
 390#define NDTR0_tRP(c)	(min((c), 7) << 0)
 391
 392#define NDTR1_tR(c)	(min((c), 65535) << 16)
 393#define NDTR1_tWHR(c)	(min((c), 15) << 4)
 394#define NDTR1_tAR(c)	(min((c), 15) << 0)
 395
 396/* convert nano-seconds to nand flash controller clock cycles */
 397#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
 398
 399static const struct of_device_id pxa3xx_nand_dt_ids[] = {
 400	{
 401		.compatible = "marvell,pxa3xx-nand",
 402		.data       = (void *)PXA3XX_NAND_VARIANT_PXA,
 403	},
 404	{
 405		.compatible = "marvell,armada370-nand",
 406		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
 407	},
 408	{}
 409};
 410MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
 411
 412static enum pxa3xx_nand_variant
 413pxa3xx_nand_get_variant(struct platform_device *pdev)
 414{
 415	const struct of_device_id *of_id =
 416			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
 417	if (!of_id)
 418		return PXA3XX_NAND_VARIANT_PXA;
 419	return (enum pxa3xx_nand_variant)of_id->data;
 420}
 421
 422static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
 423				   const struct pxa3xx_nand_timing *t)
 424{
 425	struct pxa3xx_nand_info *info = host->info_data;
 426	unsigned long nand_clk = clk_get_rate(info->clk);
 427	uint32_t ndtr0, ndtr1;
 428
 429	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
 430		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
 431		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
 432		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
 433		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
 434		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
 435
 436	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
 437		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
 438		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
 439
 440	info->ndtr0cs0 = ndtr0;
 441	info->ndtr1cs0 = ndtr1;
 442	nand_writel(info, NDTR0CS0, ndtr0);
 443	nand_writel(info, NDTR1CS0, ndtr1);
 444}
 445
 446static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
 447				       const struct nand_sdr_timings *t)
 448{
 449	struct pxa3xx_nand_info *info = host->info_data;
 450	struct nand_chip *chip = &host->chip;
 451	unsigned long nand_clk = clk_get_rate(info->clk);
 452	uint32_t ndtr0, ndtr1;
 453
 454	u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
 455	u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
 456	u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
 457	u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
 458	u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
 459	u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
 460	u32 tR = chip->chip_delay * 1000;
 461	u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
 462	u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
 463
 464	/* fallback to a default value if tR = 0 */
 465	if (!tR)
 466		tR = 20000;
 467
 468	ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
 469		NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
 470		NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
 471		NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
 472		NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
 473		NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
 474
 475	ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
 476		NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
 477		NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
 478
 479	info->ndtr0cs0 = ndtr0;
 480	info->ndtr1cs0 = ndtr1;
 481	nand_writel(info, NDTR0CS0, ndtr0);
 482	nand_writel(info, NDTR1CS0, ndtr1);
 483}
 484
 485static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
 486					   unsigned int *flash_width,
 487					   unsigned int *dfc_width)
 488{
 489	struct nand_chip *chip = &host->chip;
 490	struct pxa3xx_nand_info *info = host->info_data;
 491	const struct pxa3xx_nand_flash *f = NULL;
 492	struct mtd_info *mtd = nand_to_mtd(&host->chip);
 493	int i, id, ntypes;
 494
 495	ntypes = ARRAY_SIZE(builtin_flash_types);
 496
 497	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
 498
 499	id = chip->read_byte(mtd);
 500	id |= chip->read_byte(mtd) << 0x8;
 501
 502	for (i = 0; i < ntypes; i++) {
 503		f = &builtin_flash_types[i];
 504
 505		if (f->chip_id == id)
 506			break;
 507	}
 508
 509	if (i == ntypes) {
 510		dev_err(&info->pdev->dev, "Error: timings not found\n");
 511		return -EINVAL;
 512	}
 513
 514	pxa3xx_nand_set_timing(host, f->timing);
 515
 516	*flash_width = f->flash_width;
 517	*dfc_width = f->dfc_width;
 518
 519	return 0;
 520}
 521
 522static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
 523					 int mode)
 524{
 525	const struct nand_sdr_timings *timings;
 526
 527	mode = fls(mode) - 1;
 528	if (mode < 0)
 529		mode = 0;
 530
 531	timings = onfi_async_timing_mode_to_sdr_timings(mode);
 532	if (IS_ERR(timings))
 533		return PTR_ERR(timings);
 534
 535	pxa3xx_nand_set_sdr_timing(host, timings);
 536
 537	return 0;
 538}
 539
 540static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
 541{
 542	struct nand_chip *chip = &host->chip;
 543	struct pxa3xx_nand_info *info = host->info_data;
 544	unsigned int flash_width = 0, dfc_width = 0;
 545	int mode, err;
 546
 547	mode = onfi_get_async_timing_mode(chip);
 548	if (mode == ONFI_TIMING_MODE_UNKNOWN) {
 549		err = pxa3xx_nand_init_timings_compat(host, &flash_width,
 550						      &dfc_width);
 551		if (err)
 552			return err;
 553
 554		if (flash_width == 16) {
 555			info->reg_ndcr |= NDCR_DWIDTH_M;
 556			chip->options |= NAND_BUSWIDTH_16;
 557		}
 558
 559		info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
 560	} else {
 561		err = pxa3xx_nand_init_timings_onfi(host, mode);
 562		if (err)
 563			return err;
 564	}
 565
 566	return 0;
 567}
 568
 569/**
 570 * NOTE: it is a must to set ND_RUN firstly, then write
 571 * command buffer, otherwise, it does not work.
 572 * We enable all the interrupt at the same time, and
 573 * let pxa3xx_nand_irq to handle all logic.
 574 */
 575static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
 576{
 577	uint32_t ndcr;
 578
 579	ndcr = info->reg_ndcr;
 580
 581	if (info->use_ecc) {
 582		ndcr |= NDCR_ECC_EN;
 583		if (info->ecc_bch)
 584			nand_writel(info, NDECCCTRL, 0x1);
 585	} else {
 586		ndcr &= ~NDCR_ECC_EN;
 587		if (info->ecc_bch)
 588			nand_writel(info, NDECCCTRL, 0x0);
 589	}
 590
 591	if (info->use_dma)
 592		ndcr |= NDCR_DMA_EN;
 593	else
 594		ndcr &= ~NDCR_DMA_EN;
 595
 596	if (info->use_spare)
 597		ndcr |= NDCR_SPARE_EN;
 598	else
 599		ndcr &= ~NDCR_SPARE_EN;
 600
 601	ndcr |= NDCR_ND_RUN;
 602
 603	/* clear status bits and run */
 604	nand_writel(info, NDSR, NDSR_MASK);
 605	nand_writel(info, NDCR, 0);
 606	nand_writel(info, NDCR, ndcr);
 607}
 608
 609static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
 610{
 611	uint32_t ndcr;
 612	int timeout = NAND_STOP_DELAY;
 613
 614	/* wait RUN bit in NDCR become 0 */
 615	ndcr = nand_readl(info, NDCR);
 616	while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
 617		ndcr = nand_readl(info, NDCR);
 618		udelay(1);
 619	}
 620
 621	if (timeout <= 0) {
 622		ndcr &= ~NDCR_ND_RUN;
 623		nand_writel(info, NDCR, ndcr);
 624	}
 625	if (info->dma_chan)
 626		dmaengine_terminate_all(info->dma_chan);
 627
 628	/* clear status bits */
 629	nand_writel(info, NDSR, NDSR_MASK);
 630}
 631
 632static void __maybe_unused
 633enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
 634{
 635	uint32_t ndcr;
 636
 637	ndcr = nand_readl(info, NDCR);
 638	nand_writel(info, NDCR, ndcr & ~int_mask);
 639}
 640
 641static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
 642{
 643	uint32_t ndcr;
 644
 645	ndcr = nand_readl(info, NDCR);
 646	nand_writel(info, NDCR, ndcr | int_mask);
 647}
 648
 649static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
 650{
 651	if (info->ecc_bch) {
 652		u32 val;
 653		int ret;
 654
 655		/*
 656		 * According to the datasheet, when reading from NDDB
 657		 * with BCH enabled, after each 32 bytes reads, we
 658		 * have to make sure that the NDSR.RDDREQ bit is set.
 659		 *
 660		 * Drain the FIFO 8 32 bits reads at a time, and skip
 661		 * the polling on the last read.
 662		 */
 663		while (len > 8) {
 664			ioread32_rep(info->mmio_base + NDDB, data, 8);
 665
 666			ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
 667							 val & NDSR_RDDREQ, 1000, 5000);
 668			if (ret) {
 669				dev_err(&info->pdev->dev,
 670					"Timeout on RDDREQ while draining the FIFO\n");
 671				return;
 672			}
 673
 674			data += 32;
 675			len -= 8;
 676		}
 677	}
 678
 679	ioread32_rep(info->mmio_base + NDDB, data, len);
 680}
 681
 682static void handle_data_pio(struct pxa3xx_nand_info *info)
 683{
 684	switch (info->state) {
 685	case STATE_PIO_WRITING:
 686		if (info->step_chunk_size)
 687			writesl(info->mmio_base + NDDB,
 688				info->data_buff + info->data_buff_pos,
 689				DIV_ROUND_UP(info->step_chunk_size, 4));
 690
 691		if (info->step_spare_size)
 692			writesl(info->mmio_base + NDDB,
 693				info->oob_buff + info->oob_buff_pos,
 694				DIV_ROUND_UP(info->step_spare_size, 4));
 695		break;
 696	case STATE_PIO_READING:
 697		if (info->step_chunk_size)
 698			drain_fifo(info,
 699				   info->data_buff + info->data_buff_pos,
 700				   DIV_ROUND_UP(info->step_chunk_size, 4));
 701
 702		if (info->step_spare_size)
 703			drain_fifo(info,
 704				   info->oob_buff + info->oob_buff_pos,
 705				   DIV_ROUND_UP(info->step_spare_size, 4));
 706		break;
 707	default:
 708		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
 709				info->state);
 710		BUG();
 711	}
 712
 713	/* Update buffer pointers for multi-page read/write */
 714	info->data_buff_pos += info->step_chunk_size;
 715	info->oob_buff_pos += info->step_spare_size;
 716}
 717
 718static void pxa3xx_nand_data_dma_irq(void *data)
 719{
 720	struct pxa3xx_nand_info *info = data;
 721	struct dma_tx_state state;
 722	enum dma_status status;
 723
 724	status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
 725	if (likely(status == DMA_COMPLETE)) {
 726		info->state = STATE_DMA_DONE;
 727	} else {
 728		dev_err(&info->pdev->dev, "DMA error on data channel\n");
 729		info->retcode = ERR_DMABUSERR;
 730	}
 731	dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
 732
 733	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
 734	enable_int(info, NDCR_INT_MASK);
 735}
 736
 737static void start_data_dma(struct pxa3xx_nand_info *info)
 738{
 739	enum dma_transfer_direction direction;
 740	struct dma_async_tx_descriptor *tx;
 741
 742	switch (info->state) {
 743	case STATE_DMA_WRITING:
 744		info->dma_dir = DMA_TO_DEVICE;
 745		direction = DMA_MEM_TO_DEV;
 746		break;
 747	case STATE_DMA_READING:
 748		info->dma_dir = DMA_FROM_DEVICE;
 749		direction = DMA_DEV_TO_MEM;
 750		break;
 751	default:
 752		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
 753				info->state);
 754		BUG();
 755	}
 756	info->sg.length = info->chunk_size;
 757	if (info->use_spare)
 758		info->sg.length += info->spare_size + info->ecc_size;
 759	dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
 760
 761	tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
 762				     DMA_PREP_INTERRUPT);
 763	if (!tx) {
 764		dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
 765		return;
 766	}
 767	tx->callback = pxa3xx_nand_data_dma_irq;
 768	tx->callback_param = info;
 769	info->dma_cookie = dmaengine_submit(tx);
 770	dma_async_issue_pending(info->dma_chan);
 771	dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
 772		__func__, direction, info->dma_cookie, info->sg.length);
 773}
 774
 775static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
 776{
 777	struct pxa3xx_nand_info *info = data;
 778
 779	handle_data_pio(info);
 780
 781	info->state = STATE_CMD_DONE;
 782	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
 783
 784	return IRQ_HANDLED;
 785}
 786
 787static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
 788{
 789	struct pxa3xx_nand_info *info = devid;
 790	unsigned int status, is_completed = 0, is_ready = 0;
 791	unsigned int ready, cmd_done;
 792	irqreturn_t ret = IRQ_HANDLED;
 793
 794	if (info->cs == 0) {
 795		ready           = NDSR_FLASH_RDY;
 796		cmd_done        = NDSR_CS0_CMDD;
 797	} else {
 798		ready           = NDSR_RDY;
 799		cmd_done        = NDSR_CS1_CMDD;
 800	}
 801
 802	status = nand_readl(info, NDSR);
 803
 804	if (status & NDSR_UNCORERR)
 805		info->retcode = ERR_UNCORERR;
 806	if (status & NDSR_CORERR) {
 807		info->retcode = ERR_CORERR;
 808		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
 809		    info->ecc_bch)
 810			info->ecc_err_cnt = NDSR_ERR_CNT(status);
 811		else
 812			info->ecc_err_cnt = 1;
 813
 814		/*
 815		 * Each chunk composing a page is corrected independently,
 816		 * and we need to store maximum number of corrected bitflips
 817		 * to return it to the MTD layer in ecc.read_page().
 818		 */
 819		info->max_bitflips = max_t(unsigned int,
 820					   info->max_bitflips,
 821					   info->ecc_err_cnt);
 822	}
 823	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
 824		/* whether use dma to transfer data */
 825		if (info->use_dma) {
 826			disable_int(info, NDCR_INT_MASK);
 827			info->state = (status & NDSR_RDDREQ) ?
 828				      STATE_DMA_READING : STATE_DMA_WRITING;
 829			start_data_dma(info);
 830			goto NORMAL_IRQ_EXIT;
 831		} else {
 832			info->state = (status & NDSR_RDDREQ) ?
 833				      STATE_PIO_READING : STATE_PIO_WRITING;
 834			ret = IRQ_WAKE_THREAD;
 835			goto NORMAL_IRQ_EXIT;
 836		}
 837	}
 838	if (status & cmd_done) {
 839		info->state = STATE_CMD_DONE;
 840		is_completed = 1;
 841	}
 842	if (status & ready) {
 843		info->state = STATE_READY;
 844		is_ready = 1;
 845	}
 846
 847	/*
 848	 * Clear all status bit before issuing the next command, which
 849	 * can and will alter the status bits and will deserve a new
 850	 * interrupt on its own. This lets the controller exit the IRQ
 851	 */
 852	nand_writel(info, NDSR, status);
 853
 854	if (status & NDSR_WRCMDREQ) {
 855		status &= ~NDSR_WRCMDREQ;
 856		info->state = STATE_CMD_HANDLE;
 857
 858		/*
 859		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
 860		 * must be loaded by writing directly either 12 or 16
 861		 * bytes directly to NDCB0, four bytes at a time.
 862		 *
 863		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
 864		 * but each NDCBx register can be read.
 865		 */
 866		nand_writel(info, NDCB0, info->ndcb0);
 867		nand_writel(info, NDCB0, info->ndcb1);
 868		nand_writel(info, NDCB0, info->ndcb2);
 869
 870		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
 871		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
 872			nand_writel(info, NDCB0, info->ndcb3);
 873	}
 874
 875	if (is_completed)
 876		complete(&info->cmd_complete);
 877	if (is_ready)
 878		complete(&info->dev_ready);
 879NORMAL_IRQ_EXIT:
 880	return ret;
 881}
 882
 883static inline int is_buf_blank(uint8_t *buf, size_t len)
 884{
 885	for (; len > 0; len--)
 886		if (*buf++ != 0xff)
 887			return 0;
 888	return 1;
 889}
 890
 891static void set_command_address(struct pxa3xx_nand_info *info,
 892		unsigned int page_size, uint16_t column, int page_addr)
 893{
 894	/* small page addr setting */
 895	if (page_size < PAGE_CHUNK_SIZE) {
 896		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
 897				| (column & 0xFF);
 898
 899		info->ndcb2 = 0;
 900	} else {
 901		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
 902				| (column & 0xFFFF);
 903
 904		if (page_addr & 0xFF0000)
 905			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
 906		else
 907			info->ndcb2 = 0;
 908	}
 909}
 910
 911static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
 912{
 913	struct pxa3xx_nand_host *host = info->host[info->cs];
 914	struct mtd_info *mtd = nand_to_mtd(&host->chip);
 915
 916	/* reset data and oob column point to handle data */
 917	info->buf_start		= 0;
 918	info->buf_count		= 0;
 919	info->data_buff_pos	= 0;
 920	info->oob_buff_pos	= 0;
 921	info->step_chunk_size   = 0;
 922	info->step_spare_size   = 0;
 923	info->cur_chunk         = 0;
 924	info->use_ecc		= 0;
 925	info->use_spare		= 1;
 926	info->retcode		= ERR_NONE;
 927	info->ecc_err_cnt	= 0;
 928	info->ndcb3		= 0;
 929	info->need_wait		= 0;
 930
 931	switch (command) {
 932	case NAND_CMD_READ0:
 933	case NAND_CMD_PAGEPROG:
 934		info->use_ecc = 1;
 935		break;
 936	case NAND_CMD_PARAM:
 937		info->use_spare = 0;
 938		break;
 939	default:
 940		info->ndcb1 = 0;
 941		info->ndcb2 = 0;
 942		break;
 943	}
 944
 945	/*
 946	 * If we are about to issue a read command, or about to set
 947	 * the write address, then clean the data buffer.
 948	 */
 949	if (command == NAND_CMD_READ0 ||
 950	    command == NAND_CMD_READOOB ||
 951	    command == NAND_CMD_SEQIN) {
 952
 953		info->buf_count = mtd->writesize + mtd->oobsize;
 954		memset(info->data_buff, 0xFF, info->buf_count);
 955	}
 956
 957}
 958
 959static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
 960		int ext_cmd_type, uint16_t column, int page_addr)
 961{
 962	int addr_cycle, exec_cmd;
 963	struct pxa3xx_nand_host *host;
 964	struct mtd_info *mtd;
 965
 966	host = info->host[info->cs];
 967	mtd = nand_to_mtd(&host->chip);
 968	addr_cycle = 0;
 969	exec_cmd = 1;
 970
 971	if (info->cs != 0)
 972		info->ndcb0 = NDCB0_CSEL;
 973	else
 974		info->ndcb0 = 0;
 975
 976	if (command == NAND_CMD_SEQIN)
 977		exec_cmd = 0;
 978
 979	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
 980				    + host->col_addr_cycles);
 981
 982	switch (command) {
 983	case NAND_CMD_READOOB:
 984	case NAND_CMD_READ0:
 985		info->buf_start = column;
 986		info->ndcb0 |= NDCB0_CMD_TYPE(0)
 987				| addr_cycle
 988				| NAND_CMD_READ0;
 989
 990		if (command == NAND_CMD_READOOB)
 991			info->buf_start += mtd->writesize;
 992
 993		if (info->cur_chunk < info->nfullchunks) {
 994			info->step_chunk_size = info->chunk_size;
 995			info->step_spare_size = info->spare_size;
 996		} else {
 997			info->step_chunk_size = info->last_chunk_size;
 998			info->step_spare_size = info->last_spare_size;
 999		}
1000
1001		/*
1002		 * Multiple page read needs an 'extended command type' field,
1003		 * which is either naked-read or last-read according to the
1004		 * state.
1005		 */
1006		if (mtd->writesize == PAGE_CHUNK_SIZE) {
1007			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
1008		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
1009			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
1010					| NDCB0_LEN_OVRD
1011					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1012			info->ndcb3 = info->step_chunk_size +
1013				info->step_spare_size;
1014		}
1015
1016		set_command_address(info, mtd->writesize, column, page_addr);
1017		break;
1018
1019	case NAND_CMD_SEQIN:
1020
1021		info->buf_start = column;
1022		set_command_address(info, mtd->writesize, 0, page_addr);
1023
1024		/*
1025		 * Multiple page programming needs to execute the initial
1026		 * SEQIN command that sets the page address.
1027		 */
1028		if (mtd->writesize > PAGE_CHUNK_SIZE) {
1029			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1030				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1031				| addr_cycle
1032				| command;
1033			exec_cmd = 1;
1034		}
1035		break;
1036
1037	case NAND_CMD_PAGEPROG:
1038		if (is_buf_blank(info->data_buff,
1039					(mtd->writesize + mtd->oobsize))) {
1040			exec_cmd = 0;
1041			break;
1042		}
1043
1044		if (info->cur_chunk < info->nfullchunks) {
1045			info->step_chunk_size = info->chunk_size;
1046			info->step_spare_size = info->spare_size;
1047		} else {
1048			info->step_chunk_size = info->last_chunk_size;
1049			info->step_spare_size = info->last_spare_size;
1050		}
1051
1052		/* Second command setting for large pages */
1053		if (mtd->writesize > PAGE_CHUNK_SIZE) {
1054			/*
1055			 * Multiple page write uses the 'extended command'
1056			 * field. This can be used to issue a command dispatch
1057			 * or a naked-write depending on the current stage.
1058			 */
1059			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1060					| NDCB0_LEN_OVRD
1061					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1062			info->ndcb3 = info->step_chunk_size +
1063				      info->step_spare_size;
1064
1065			/*
1066			 * This is the command dispatch that completes a chunked
1067			 * page program operation.
1068			 */
1069			if (info->cur_chunk == info->ntotalchunks) {
1070				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1071					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1072					| command;
1073				info->ndcb1 = 0;
1074				info->ndcb2 = 0;
1075				info->ndcb3 = 0;
1076			}
1077		} else {
1078			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1079					| NDCB0_AUTO_RS
1080					| NDCB0_ST_ROW_EN
1081					| NDCB0_DBC
1082					| (NAND_CMD_PAGEPROG << 8)
1083					| NAND_CMD_SEQIN
1084					| addr_cycle;
1085		}
1086		break;
1087
1088	case NAND_CMD_PARAM:
1089		info->buf_count = INIT_BUFFER_SIZE;
1090		info->ndcb0 |= NDCB0_CMD_TYPE(0)
1091				| NDCB0_ADDR_CYC(1)
1092				| NDCB0_LEN_OVRD
1093				| command;
1094		info->ndcb1 = (column & 0xFF);
1095		info->ndcb3 = INIT_BUFFER_SIZE;
1096		info->step_chunk_size = INIT_BUFFER_SIZE;
1097		break;
1098
1099	case NAND_CMD_READID:
1100		info->buf_count = READ_ID_BYTES;
1101		info->ndcb0 |= NDCB0_CMD_TYPE(3)
1102				| NDCB0_ADDR_CYC(1)
1103				| command;
1104		info->ndcb1 = (column & 0xFF);
1105
1106		info->step_chunk_size = 8;
1107		break;
1108	case NAND_CMD_STATUS:
1109		info->buf_count = 1;
1110		info->ndcb0 |= NDCB0_CMD_TYPE(4)
1111				| NDCB0_ADDR_CYC(1)
1112				| command;
1113
1114		info->step_chunk_size = 8;
1115		break;
1116
1117	case NAND_CMD_ERASE1:
1118		info->ndcb0 |= NDCB0_CMD_TYPE(2)
1119				| NDCB0_AUTO_RS
1120				| NDCB0_ADDR_CYC(3)
1121				| NDCB0_DBC
1122				| (NAND_CMD_ERASE2 << 8)
1123				| NAND_CMD_ERASE1;
1124		info->ndcb1 = page_addr;
1125		info->ndcb2 = 0;
1126
1127		break;
1128	case NAND_CMD_RESET:
1129		info->ndcb0 |= NDCB0_CMD_TYPE(5)
1130				| command;
1131
1132		break;
1133
1134	case NAND_CMD_ERASE2:
1135		exec_cmd = 0;
1136		break;
1137
1138	default:
1139		exec_cmd = 0;
1140		dev_err(&info->pdev->dev, "non-supported command %x\n",
1141				command);
1142		break;
1143	}
1144
1145	return exec_cmd;
1146}
1147
1148static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1149			 int column, int page_addr)
1150{
1151	struct nand_chip *chip = mtd_to_nand(mtd);
1152	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1153	struct pxa3xx_nand_info *info = host->info_data;
1154	int exec_cmd;
1155
1156	/*
1157	 * if this is a x16 device ,then convert the input
1158	 * "byte" address into a "word" address appropriate
1159	 * for indexing a word-oriented device
1160	 */
1161	if (info->reg_ndcr & NDCR_DWIDTH_M)
1162		column /= 2;
1163
1164	/*
1165	 * There may be different NAND chip hooked to
1166	 * different chip select, so check whether
1167	 * chip select has been changed, if yes, reset the timing
1168	 */
1169	if (info->cs != host->cs) {
1170		info->cs = host->cs;
1171		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1172		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1173	}
1174
1175	prepare_start_command(info, command);
1176
1177	info->state = STATE_PREPARED;
1178	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1179
1180	if (exec_cmd) {
1181		init_completion(&info->cmd_complete);
1182		init_completion(&info->dev_ready);
1183		info->need_wait = 1;
1184		pxa3xx_nand_start(info);
1185
1186		if (!wait_for_completion_timeout(&info->cmd_complete,
1187		    CHIP_DELAY_TIMEOUT)) {
1188			dev_err(&info->pdev->dev, "Wait time out!!!\n");
1189			/* Stop State Machine for next command cycle */
1190			pxa3xx_nand_stop(info);
1191		}
1192	}
1193	info->state = STATE_IDLE;
1194}
1195
1196static void nand_cmdfunc_extended(struct mtd_info *mtd,
1197				  const unsigned command,
1198				  int column, int page_addr)
1199{
1200	struct nand_chip *chip = mtd_to_nand(mtd);
1201	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1202	struct pxa3xx_nand_info *info = host->info_data;
1203	int exec_cmd, ext_cmd_type;
1204
1205	/*
1206	 * if this is a x16 device then convert the input
1207	 * "byte" address into a "word" address appropriate
1208	 * for indexing a word-oriented device
1209	 */
1210	if (info->reg_ndcr & NDCR_DWIDTH_M)
1211		column /= 2;
1212
1213	/*
1214	 * There may be different NAND chip hooked to
1215	 * different chip select, so check whether
1216	 * chip select has been changed, if yes, reset the timing
1217	 */
1218	if (info->cs != host->cs) {
1219		info->cs = host->cs;
1220		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1221		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1222	}
1223
1224	/* Select the extended command for the first command */
1225	switch (command) {
1226	case NAND_CMD_READ0:
1227	case NAND_CMD_READOOB:
1228		ext_cmd_type = EXT_CMD_TYPE_MONO;
1229		break;
1230	case NAND_CMD_SEQIN:
1231		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1232		break;
1233	case NAND_CMD_PAGEPROG:
1234		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1235		break;
1236	default:
1237		ext_cmd_type = 0;
1238		break;
1239	}
1240
1241	prepare_start_command(info, command);
1242
1243	/*
1244	 * Prepare the "is ready" completion before starting a command
1245	 * transaction sequence. If the command is not executed the
1246	 * completion will be completed, see below.
1247	 *
1248	 * We can do that inside the loop because the command variable
1249	 * is invariant and thus so is the exec_cmd.
1250	 */
1251	info->need_wait = 1;
1252	init_completion(&info->dev_ready);
1253	do {
1254		info->state = STATE_PREPARED;
1255
1256		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1257					       column, page_addr);
1258		if (!exec_cmd) {
1259			info->need_wait = 0;
1260			complete(&info->dev_ready);
1261			break;
1262		}
1263
1264		init_completion(&info->cmd_complete);
1265		pxa3xx_nand_start(info);
1266
1267		if (!wait_for_completion_timeout(&info->cmd_complete,
1268		    CHIP_DELAY_TIMEOUT)) {
1269			dev_err(&info->pdev->dev, "Wait time out!!!\n");
1270			/* Stop State Machine for next command cycle */
1271			pxa3xx_nand_stop(info);
1272			break;
1273		}
1274
1275		/* Only a few commands need several steps */
1276		if (command != NAND_CMD_PAGEPROG &&
1277		    command != NAND_CMD_READ0    &&
1278		    command != NAND_CMD_READOOB)
1279			break;
1280
1281		info->cur_chunk++;
1282
1283		/* Check if the sequence is complete */
1284		if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
1285			break;
1286
1287		/*
1288		 * After a splitted program command sequence has issued
1289		 * the command dispatch, the command sequence is complete.
1290		 */
1291		if (info->cur_chunk == (info->ntotalchunks + 1) &&
1292		    command == NAND_CMD_PAGEPROG &&
1293		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1294			break;
1295
1296		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1297			/* Last read: issue a 'last naked read' */
1298			if (info->cur_chunk == info->ntotalchunks - 1)
1299				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1300			else
1301				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1302
1303		/*
1304		 * If a splitted program command has no more data to transfer,
1305		 * the command dispatch must be issued to complete.
1306		 */
1307		} else if (command == NAND_CMD_PAGEPROG &&
1308			   info->cur_chunk == info->ntotalchunks) {
1309				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1310		}
1311	} while (1);
1312
1313	info->state = STATE_IDLE;
1314}
1315
1316static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1317		struct nand_chip *chip, const uint8_t *buf, int oob_required,
1318		int page)
1319{
1320	chip->write_buf(mtd, buf, mtd->writesize);
1321	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1322
1323	return 0;
1324}
1325
1326static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1327		struct nand_chip *chip, uint8_t *buf, int oob_required,
1328		int page)
1329{
1330	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1331	struct pxa3xx_nand_info *info = host->info_data;
1332
1333	chip->read_buf(mtd, buf, mtd->writesize);
1334	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1335
1336	if (info->retcode == ERR_CORERR && info->use_ecc) {
1337		mtd->ecc_stats.corrected += info->ecc_err_cnt;
1338
1339	} else if (info->retcode == ERR_UNCORERR) {
1340		/*
1341		 * for blank page (all 0xff), HW will calculate its ECC as
1342		 * 0, which is different from the ECC information within
1343		 * OOB, ignore such uncorrectable errors
1344		 */
1345		if (is_buf_blank(buf, mtd->writesize))
1346			info->retcode = ERR_NONE;
1347		else
1348			mtd->ecc_stats.failed++;
1349	}
1350
1351	return info->max_bitflips;
1352}
1353
1354static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1355{
1356	struct nand_chip *chip = mtd_to_nand(mtd);
1357	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1358	struct pxa3xx_nand_info *info = host->info_data;
1359	char retval = 0xFF;
1360
1361	if (info->buf_start < info->buf_count)
1362		/* Has just send a new command? */
1363		retval = info->data_buff[info->buf_start++];
1364
1365	return retval;
1366}
1367
1368static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1369{
1370	struct nand_chip *chip = mtd_to_nand(mtd);
1371	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1372	struct pxa3xx_nand_info *info = host->info_data;
1373	u16 retval = 0xFFFF;
1374
1375	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1376		retval = *((u16 *)(info->data_buff+info->buf_start));
1377		info->buf_start += 2;
1378	}
1379	return retval;
1380}
1381
1382static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1383{
1384	struct nand_chip *chip = mtd_to_nand(mtd);
1385	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1386	struct pxa3xx_nand_info *info = host->info_data;
1387	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1388
1389	memcpy(buf, info->data_buff + info->buf_start, real_len);
1390	info->buf_start += real_len;
1391}
1392
1393static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1394		const uint8_t *buf, int len)
1395{
1396	struct nand_chip *chip = mtd_to_nand(mtd);
1397	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1398	struct pxa3xx_nand_info *info = host->info_data;
1399	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1400
1401	memcpy(info->data_buff + info->buf_start, buf, real_len);
1402	info->buf_start += real_len;
1403}
1404
1405static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1406{
1407	return;
1408}
1409
1410static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1411{
1412	struct nand_chip *chip = mtd_to_nand(mtd);
1413	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1414	struct pxa3xx_nand_info *info = host->info_data;
1415
1416	if (info->need_wait) {
1417		info->need_wait = 0;
1418		if (!wait_for_completion_timeout(&info->dev_ready,
1419		    CHIP_DELAY_TIMEOUT)) {
1420			dev_err(&info->pdev->dev, "Ready time out!!!\n");
1421			return NAND_STATUS_FAIL;
1422		}
1423	}
1424
1425	/* pxa3xx_nand_send_command has waited for command complete */
1426	if (this->state == FL_WRITING || this->state == FL_ERASING) {
1427		if (info->retcode == ERR_NONE)
1428			return 0;
1429		else
1430			return NAND_STATUS_FAIL;
1431	}
1432
1433	return NAND_STATUS_READY;
1434}
1435
1436static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1437{
1438	struct pxa3xx_nand_host *host = info->host[info->cs];
1439	struct platform_device *pdev = info->pdev;
1440	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1441	const struct nand_sdr_timings *timings;
1442
1443	/* Configure default flash values */
1444	info->chunk_size = PAGE_CHUNK_SIZE;
1445	info->reg_ndcr = 0x0; /* enable all interrupts */
1446	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1447	info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1448	info->reg_ndcr |= NDCR_SPARE_EN;
1449
1450	/* use the common timing to make a try */
1451	timings = onfi_async_timing_mode_to_sdr_timings(0);
1452	if (IS_ERR(timings))
1453		return PTR_ERR(timings);
1454
1455	pxa3xx_nand_set_sdr_timing(host, timings);
1456	return 0;
1457}
1458
1459static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1460{
1461	struct pxa3xx_nand_host *host = info->host[info->cs];
1462	struct nand_chip *chip = &host->chip;
1463	struct mtd_info *mtd = nand_to_mtd(chip);
1464
1465	info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1466	info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1467	info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1468}
1469
1470static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1471{
1472	struct platform_device *pdev = info->pdev;
1473	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1474	uint32_t ndcr = nand_readl(info, NDCR);
1475
1476	/* Set an initial chunk size */
1477	info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1478	info->reg_ndcr = ndcr &
1479		~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1480	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1481	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1482	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1483}
1484
1485static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1486{
1487	struct platform_device *pdev = info->pdev;
1488	struct dma_slave_config	config;
1489	dma_cap_mask_t mask;
1490	struct pxad_param param;
1491	int ret;
1492
1493	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1494	if (info->data_buff == NULL)
1495		return -ENOMEM;
1496	if (use_dma == 0)
1497		return 0;
1498
1499	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1500	if (ret)
1501		return ret;
1502
1503	sg_init_one(&info->sg, info->data_buff, info->buf_size);
1504	dma_cap_zero(mask);
1505	dma_cap_set(DMA_SLAVE, mask);
1506	param.prio = PXAD_PRIO_LOWEST;
1507	param.drcmr = info->drcmr_dat;
1508	info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1509							  &param, &pdev->dev,
1510							  "data");
1511	if (!info->dma_chan) {
1512		dev_err(&pdev->dev, "unable to request data dma channel\n");
1513		return -ENODEV;
1514	}
1515
1516	memset(&config, 0, sizeof(config));
1517	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1518	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1519	config.src_addr = info->mmio_phys + NDDB;
1520	config.dst_addr = info->mmio_phys + NDDB;
1521	config.src_maxburst = 32;
1522	config.dst_maxburst = 32;
1523	ret = dmaengine_slave_config(info->dma_chan, &config);
1524	if (ret < 0) {
1525		dev_err(&info->pdev->dev,
1526			"dma channel configuration failed: %d\n",
1527			ret);
1528		return ret;
1529	}
1530
1531	/*
1532	 * Now that DMA buffers are allocated we turn on
1533	 * DMA proper for I/O operations.
1534	 */
1535	info->use_dma = 1;
1536	return 0;
1537}
1538
1539static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1540{
1541	if (info->use_dma) {
1542		dmaengine_terminate_all(info->dma_chan);
1543		dma_release_channel(info->dma_chan);
1544	}
1545	kfree(info->data_buff);
1546}
1547
1548static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1549			struct nand_ecc_ctrl *ecc,
1550			int strength, int ecc_stepsize, int page_size)
1551{
1552	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1553		info->nfullchunks = 1;
1554		info->ntotalchunks = 1;
1555		info->chunk_size = 2048;
1556		info->spare_size = 40;
1557		info->ecc_size = 24;
1558		ecc->mode = NAND_ECC_HW;
1559		ecc->size = 512;
1560		ecc->strength = 1;
1561
1562	} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1563		info->nfullchunks = 1;
1564		info->ntotalchunks = 1;
1565		info->chunk_size = 512;
1566		info->spare_size = 8;
1567		info->ecc_size = 8;
1568		ecc->mode = NAND_ECC_HW;
1569		ecc->size = 512;
1570		ecc->strength = 1;
1571
1572	/*
1573	 * Required ECC: 4-bit correction per 512 bytes
1574	 * Select: 16-bit correction per 2048 bytes
1575	 */
1576	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1577		info->ecc_bch = 1;
1578		info->nfullchunks = 1;
1579		info->ntotalchunks = 1;
1580		info->chunk_size = 2048;
1581		info->spare_size = 32;
1582		info->ecc_size = 32;
1583		ecc->mode = NAND_ECC_HW;
1584		ecc->size = info->chunk_size;
1585		ecc->layout = &ecc_layout_2KB_bch4bit;
1586		ecc->strength = 16;
1587
1588	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1589		info->ecc_bch = 1;
1590		info->nfullchunks = 2;
1591		info->ntotalchunks = 2;
1592		info->chunk_size = 2048;
1593		info->spare_size = 32;
1594		info->ecc_size = 32;
1595		ecc->mode = NAND_ECC_HW;
1596		ecc->size = info->chunk_size;
1597		ecc->layout = &ecc_layout_4KB_bch4bit;
1598		ecc->strength = 16;
1599
1600	/*
1601	 * Required ECC: 8-bit correction per 512 bytes
1602	 * Select: 16-bit correction per 1024 bytes
1603	 */
1604	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1605		info->ecc_bch = 1;
1606		info->nfullchunks = 4;
1607		info->ntotalchunks = 5;
1608		info->chunk_size = 1024;
1609		info->spare_size = 0;
1610		info->last_chunk_size = 0;
1611		info->last_spare_size = 64;
1612		info->ecc_size = 32;
1613		ecc->mode = NAND_ECC_HW;
1614		ecc->size = info->chunk_size;
1615		ecc->layout = &ecc_layout_4KB_bch8bit;
1616		ecc->strength = 16;
1617	} else {
1618		dev_err(&info->pdev->dev,
1619			"ECC strength %d at page size %d is not supported\n",
1620			strength, page_size);
1621		return -ENODEV;
1622	}
1623
1624	dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1625		 ecc->strength, ecc->size);
1626	return 0;
1627}
1628
1629static int pxa3xx_nand_scan(struct mtd_info *mtd)
1630{
1631	struct nand_chip *chip = mtd_to_nand(mtd);
1632	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1633	struct pxa3xx_nand_info *info = host->info_data;
1634	struct platform_device *pdev = info->pdev;
1635	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1636	int ret;
1637	uint16_t ecc_strength, ecc_step;
1638
1639	if (pdata->keep_config) {
1640		pxa3xx_nand_detect_config(info);
1641	} else {
1642		ret = pxa3xx_nand_config_ident(info);
1643		if (ret)
1644			return ret;
1645	}
1646
1647	if (info->reg_ndcr & NDCR_DWIDTH_M)
1648		chip->options |= NAND_BUSWIDTH_16;
1649
1650	/* Device detection must be done with ECC disabled */
1651	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1652		nand_writel(info, NDECCCTRL, 0x0);
1653
1654	if (nand_scan_ident(mtd, 1, NULL))
1655		return -ENODEV;
1656
1657	if (!pdata->keep_config) {
1658		ret = pxa3xx_nand_init(host);
1659		if (ret) {
1660			dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1661				ret);
1662			return ret;
1663		}
1664	}
1665
1666	if (pdata->flash_bbt) {
1667		/*
1668		 * We'll use a bad block table stored in-flash and don't
1669		 * allow writing the bad block marker to the flash.
1670		 */
1671		chip->bbt_options |= NAND_BBT_USE_FLASH |
1672				     NAND_BBT_NO_OOB_BBM;
1673		chip->bbt_td = &bbt_main_descr;
1674		chip->bbt_md = &bbt_mirror_descr;
1675	}
1676
1677	/*
1678	 * If the page size is bigger than the FIFO size, let's check
1679	 * we are given the right variant and then switch to the extended
1680	 * (aka splitted) command handling,
1681	 */
1682	if (mtd->writesize > PAGE_CHUNK_SIZE) {
1683		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1684			chip->cmdfunc = nand_cmdfunc_extended;
1685		} else {
1686			dev_err(&info->pdev->dev,
1687				"unsupported page size on this variant\n");
1688			return -ENODEV;
1689		}
1690	}
1691
1692	if (pdata->ecc_strength && pdata->ecc_step_size) {
1693		ecc_strength = pdata->ecc_strength;
1694		ecc_step = pdata->ecc_step_size;
1695	} else {
1696		ecc_strength = chip->ecc_strength_ds;
1697		ecc_step = chip->ecc_step_ds;
1698	}
1699
1700	/* Set default ECC strength requirements on non-ONFI devices */
1701	if (ecc_strength < 1 && ecc_step < 1) {
1702		ecc_strength = 1;
1703		ecc_step = 512;
1704	}
1705
1706	ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1707			   ecc_step, mtd->writesize);
1708	if (ret)
1709		return ret;
1710
1711	/* calculate addressing information */
1712	if (mtd->writesize >= 2048)
1713		host->col_addr_cycles = 2;
1714	else
1715		host->col_addr_cycles = 1;
1716
1717	/* release the initial buffer */
1718	kfree(info->data_buff);
1719
1720	/* allocate the real data + oob buffer */
1721	info->buf_size = mtd->writesize + mtd->oobsize;
1722	ret = pxa3xx_nand_init_buff(info);
1723	if (ret)
1724		return ret;
1725	info->oob_buff = info->data_buff + mtd->writesize;
1726
1727	if ((mtd->size >> chip->page_shift) > 65536)
1728		host->row_addr_cycles = 3;
1729	else
1730		host->row_addr_cycles = 2;
1731
1732	if (!pdata->keep_config)
1733		pxa3xx_nand_config_tail(info);
1734
1735	return nand_scan_tail(mtd);
1736}
1737
1738static int alloc_nand_resource(struct platform_device *pdev)
1739{
1740	struct device_node *np = pdev->dev.of_node;
1741	struct pxa3xx_nand_platform_data *pdata;
1742	struct pxa3xx_nand_info *info;
1743	struct pxa3xx_nand_host *host;
1744	struct nand_chip *chip = NULL;
1745	struct mtd_info *mtd;
1746	struct resource *r;
1747	int ret, irq, cs;
1748
1749	pdata = dev_get_platdata(&pdev->dev);
1750	if (pdata->num_cs <= 0)
1751		return -ENODEV;
1752	info = devm_kzalloc(&pdev->dev,
1753			    sizeof(*info) + sizeof(*host) * pdata->num_cs,
1754			    GFP_KERNEL);
1755	if (!info)
1756		return -ENOMEM;
1757
1758	info->pdev = pdev;
1759	info->variant = pxa3xx_nand_get_variant(pdev);
1760	for (cs = 0; cs < pdata->num_cs; cs++) {
1761		host = (void *)&info[1] + sizeof(*host) * cs;
1762		chip = &host->chip;
1763		nand_set_controller_data(chip, host);
1764		mtd = nand_to_mtd(chip);
1765		info->host[cs] = host;
1766		host->cs = cs;
1767		host->info_data = info;
1768		mtd->dev.parent = &pdev->dev;
1769		/* FIXME: all chips use the same device tree partitions */
1770		nand_set_flash_node(chip, np);
1771
1772		nand_set_controller_data(chip, host);
1773		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
1774		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
1775		chip->controller        = &info->controller;
1776		chip->waitfunc		= pxa3xx_nand_waitfunc;
1777		chip->select_chip	= pxa3xx_nand_select_chip;
1778		chip->read_word		= pxa3xx_nand_read_word;
1779		chip->read_byte		= pxa3xx_nand_read_byte;
1780		chip->read_buf		= pxa3xx_nand_read_buf;
1781		chip->write_buf		= pxa3xx_nand_write_buf;
1782		chip->options		|= NAND_NO_SUBPAGE_WRITE;
1783		chip->cmdfunc		= nand_cmdfunc;
1784	}
1785
1786	spin_lock_init(&chip->controller->lock);
1787	init_waitqueue_head(&chip->controller->wq);
1788	info->clk = devm_clk_get(&pdev->dev, NULL);
1789	if (IS_ERR(info->clk)) {
1790		dev_err(&pdev->dev, "failed to get nand clock\n");
1791		return PTR_ERR(info->clk);
1792	}
1793	ret = clk_prepare_enable(info->clk);
1794	if (ret < 0)
1795		return ret;
1796
1797	if (!np && use_dma) {
1798		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1799		if (r == NULL) {
1800			dev_err(&pdev->dev,
1801				"no resource defined for data DMA\n");
1802			ret = -ENXIO;
1803			goto fail_disable_clk;
1804		}
1805		info->drcmr_dat = r->start;
1806	}
1807
1808	irq = platform_get_irq(pdev, 0);
1809	if (irq < 0) {
1810		dev_err(&pdev->dev, "no IRQ resource defined\n");
1811		ret = -ENXIO;
1812		goto fail_disable_clk;
1813	}
1814
1815	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1816	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1817	if (IS_ERR(info->mmio_base)) {
1818		ret = PTR_ERR(info->mmio_base);
1819		goto fail_disable_clk;
1820	}
1821	info->mmio_phys = r->start;
1822
1823	/* Allocate a buffer to allow flash detection */
1824	info->buf_size = INIT_BUFFER_SIZE;
1825	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1826	if (info->data_buff == NULL) {
1827		ret = -ENOMEM;
1828		goto fail_disable_clk;
1829	}
1830
1831	/* initialize all interrupts to be disabled */
1832	disable_int(info, NDSR_MASK);
1833
1834	ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1835				   pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1836				   pdev->name, info);
1837	if (ret < 0) {
1838		dev_err(&pdev->dev, "failed to request IRQ\n");
1839		goto fail_free_buf;
1840	}
1841
1842	platform_set_drvdata(pdev, info);
1843
1844	return 0;
1845
1846fail_free_buf:
1847	free_irq(irq, info);
1848	kfree(info->data_buff);
1849fail_disable_clk:
1850	clk_disable_unprepare(info->clk);
1851	return ret;
1852}
1853
1854static int pxa3xx_nand_remove(struct platform_device *pdev)
1855{
1856	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1857	struct pxa3xx_nand_platform_data *pdata;
1858	int irq, cs;
1859
1860	if (!info)
1861		return 0;
1862
1863	pdata = dev_get_platdata(&pdev->dev);
1864
1865	irq = platform_get_irq(pdev, 0);
1866	if (irq >= 0)
1867		free_irq(irq, info);
1868	pxa3xx_nand_free_buff(info);
1869
1870	/*
1871	 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1872	 * In order to prevent a lockup of the system bus, the DFI bus
1873	 * arbitration is granted to SMC upon driver removal. This is done by
1874	 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1875	 * access to the bus anymore.
1876	 */
1877	nand_writel(info, NDCR,
1878		    (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1879		    NFCV1_NDCR_ARB_CNTL);
1880	clk_disable_unprepare(info->clk);
1881
1882	for (cs = 0; cs < pdata->num_cs; cs++)
1883		nand_release(nand_to_mtd(&info->host[cs]->chip));
1884	return 0;
1885}
1886
1887static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1888{
1889	struct pxa3xx_nand_platform_data *pdata;
1890	struct device_node *np = pdev->dev.of_node;
1891	const struct of_device_id *of_id =
1892			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1893
1894	if (!of_id)
1895		return 0;
1896
1897	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1898	if (!pdata)
1899		return -ENOMEM;
1900
1901	if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1902		pdata->enable_arbiter = 1;
1903	if (of_get_property(np, "marvell,nand-keep-config", NULL))
1904		pdata->keep_config = 1;
1905	of_property_read_u32(np, "num-cs", &pdata->num_cs);
1906	pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1907
1908	pdata->ecc_strength = of_get_nand_ecc_strength(np);
1909	if (pdata->ecc_strength < 0)
1910		pdata->ecc_strength = 0;
1911
1912	pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1913	if (pdata->ecc_step_size < 0)
1914		pdata->ecc_step_size = 0;
1915
1916	pdev->dev.platform_data = pdata;
1917
1918	return 0;
1919}
1920
1921static int pxa3xx_nand_probe(struct platform_device *pdev)
1922{
1923	struct pxa3xx_nand_platform_data *pdata;
1924	struct pxa3xx_nand_info *info;
1925	int ret, cs, probe_success, dma_available;
1926
1927	dma_available = IS_ENABLED(CONFIG_ARM) &&
1928		(IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1929	if (use_dma && !dma_available) {
1930		use_dma = 0;
1931		dev_warn(&pdev->dev,
1932			 "This platform can't do DMA on this device\n");
1933	}
1934
1935	ret = pxa3xx_nand_probe_dt(pdev);
1936	if (ret)
1937		return ret;
1938
1939	pdata = dev_get_platdata(&pdev->dev);
1940	if (!pdata) {
1941		dev_err(&pdev->dev, "no platform data defined\n");
1942		return -ENODEV;
1943	}
1944
1945	ret = alloc_nand_resource(pdev);
1946	if (ret) {
1947		dev_err(&pdev->dev, "alloc nand resource failed\n");
1948		return ret;
1949	}
1950
1951	info = platform_get_drvdata(pdev);
1952	probe_success = 0;
1953	for (cs = 0; cs < pdata->num_cs; cs++) {
1954		struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1955
1956		/*
1957		 * The mtd name matches the one used in 'mtdparts' kernel
1958		 * parameter. This name cannot be changed or otherwise
1959		 * user's mtd partitions configuration would get broken.
1960		 */
1961		mtd->name = "pxa3xx_nand-0";
1962		info->cs = cs;
1963		ret = pxa3xx_nand_scan(mtd);
1964		if (ret) {
1965			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1966				cs);
1967			continue;
1968		}
1969
1970		ret = mtd_device_register(mtd, pdata->parts[cs],
1971					  pdata->nr_parts[cs]);
1972		if (!ret)
1973			probe_success = 1;
1974	}
1975
1976	if (!probe_success) {
1977		pxa3xx_nand_remove(pdev);
1978		return -ENODEV;
1979	}
1980
1981	return 0;
1982}
1983
1984#ifdef CONFIG_PM
1985static int pxa3xx_nand_suspend(struct device *dev)
1986{
1987	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1988
1989	if (info->state) {
1990		dev_err(dev, "driver busy, state = %d\n", info->state);
1991		return -EAGAIN;
1992	}
1993
1994	clk_disable(info->clk);
1995	return 0;
1996}
1997
1998static int pxa3xx_nand_resume(struct device *dev)
1999{
2000	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
2001	int ret;
2002
2003	ret = clk_enable(info->clk);
2004	if (ret < 0)
2005		return ret;
2006
2007	/* We don't want to handle interrupt without calling mtd routine */
2008	disable_int(info, NDCR_INT_MASK);
2009
2010	/*
2011	 * Directly set the chip select to a invalid value,
2012	 * then the driver would reset the timing according
2013	 * to current chip select at the beginning of cmdfunc
2014	 */
2015	info->cs = 0xff;
2016
2017	/*
2018	 * As the spec says, the NDSR would be updated to 0x1800 when
2019	 * doing the nand_clk disable/enable.
2020	 * To prevent it damaging state machine of the driver, clear
2021	 * all status before resume
2022	 */
2023	nand_writel(info, NDSR, NDSR_MASK);
2024
2025	return 0;
2026}
2027#else
2028#define pxa3xx_nand_suspend	NULL
2029#define pxa3xx_nand_resume	NULL
2030#endif
2031
2032static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
2033	.suspend	= pxa3xx_nand_suspend,
2034	.resume		= pxa3xx_nand_resume,
2035};
2036
2037static struct platform_driver pxa3xx_nand_driver = {
2038	.driver = {
2039		.name	= "pxa3xx-nand",
2040		.of_match_table = pxa3xx_nand_dt_ids,
2041		.pm	= &pxa3xx_nand_pm_ops,
2042	},
2043	.probe		= pxa3xx_nand_probe,
2044	.remove		= pxa3xx_nand_remove,
2045};
2046
2047module_platform_driver(pxa3xx_nand_driver);
2048
2049MODULE_LICENSE("GPL");
2050MODULE_DESCRIPTION("PXA3xx NAND controller driver");