Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Driver for Cadence QSPI Controller
   3 *
   4 * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18#include <linux/clk.h>
  19#include <linux/completion.h>
  20#include <linux/delay.h>
  21#include <linux/err.h>
  22#include <linux/errno.h>
  23#include <linux/interrupt.h>
  24#include <linux/io.h>
  25#include <linux/jiffies.h>
  26#include <linux/kernel.h>
  27#include <linux/module.h>
  28#include <linux/mtd/mtd.h>
  29#include <linux/mtd/partitions.h>
  30#include <linux/mtd/spi-nor.h>
  31#include <linux/of_device.h>
  32#include <linux/of.h>
  33#include <linux/platform_device.h>
  34#include <linux/sched.h>
  35#include <linux/spi/spi.h>
  36#include <linux/timer.h>
  37
  38#define CQSPI_NAME			"cadence-qspi"
  39#define CQSPI_MAX_CHIPSELECT		16
  40
  41struct cqspi_st;
  42
  43struct cqspi_flash_pdata {
  44	struct spi_nor	nor;
  45	struct cqspi_st	*cqspi;
  46	u32		clk_rate;
  47	u32		read_delay;
  48	u32		tshsl_ns;
  49	u32		tsd2d_ns;
  50	u32		tchsh_ns;
  51	u32		tslch_ns;
  52	u8		inst_width;
  53	u8		addr_width;
  54	u8		data_width;
  55	u8		cs;
  56	bool		registered;
  57};
  58
  59struct cqspi_st {
  60	struct platform_device	*pdev;
  61
  62	struct clk		*clk;
  63	unsigned int		sclk;
  64
  65	void __iomem		*iobase;
  66	void __iomem		*ahb_base;
  67	struct completion	transfer_complete;
  68	struct mutex		bus_mutex;
  69
  70	int			current_cs;
  71	int			current_page_size;
  72	int			current_erase_size;
  73	int			current_addr_width;
  74	unsigned long		master_ref_clk_hz;
  75	bool			is_decoded_cs;
  76	u32			fifo_depth;
  77	u32			fifo_width;
  78	u32			trigger_address;
  79	struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
  80};
  81
  82/* Operation timeout value */
  83#define CQSPI_TIMEOUT_MS			500
  84#define CQSPI_READ_TIMEOUT_MS			10
  85
  86/* Instruction type */
  87#define CQSPI_INST_TYPE_SINGLE			0
  88#define CQSPI_INST_TYPE_DUAL			1
  89#define CQSPI_INST_TYPE_QUAD			2
  90
  91#define CQSPI_DUMMY_CLKS_PER_BYTE		8
  92#define CQSPI_DUMMY_BYTES_MAX			4
  93#define CQSPI_DUMMY_CLKS_MAX			31
  94
  95#define CQSPI_STIG_DATA_LEN_MAX			8
  96
  97/* Register map */
  98#define CQSPI_REG_CONFIG			0x00
  99#define CQSPI_REG_CONFIG_ENABLE_MASK		BIT(0)
 100#define CQSPI_REG_CONFIG_DECODE_MASK		BIT(9)
 101#define CQSPI_REG_CONFIG_CHIPSELECT_LSB		10
 102#define CQSPI_REG_CONFIG_DMA_MASK		BIT(15)
 103#define CQSPI_REG_CONFIG_BAUD_LSB		19
 104#define CQSPI_REG_CONFIG_IDLE_LSB		31
 105#define CQSPI_REG_CONFIG_CHIPSELECT_MASK	0xF
 106#define CQSPI_REG_CONFIG_BAUD_MASK		0xF
 107
 108#define CQSPI_REG_RD_INSTR			0x04
 109#define CQSPI_REG_RD_INSTR_OPCODE_LSB		0
 110#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB	8
 111#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB	12
 112#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB	16
 113#define CQSPI_REG_RD_INSTR_MODE_EN_LSB		20
 114#define CQSPI_REG_RD_INSTR_DUMMY_LSB		24
 115#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK	0x3
 116#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK	0x3
 117#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK	0x3
 118#define CQSPI_REG_RD_INSTR_DUMMY_MASK		0x1F
 119
 120#define CQSPI_REG_WR_INSTR			0x08
 121#define CQSPI_REG_WR_INSTR_OPCODE_LSB		0
 122#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB	12
 123#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB	16
 124
 125#define CQSPI_REG_DELAY				0x0C
 126#define CQSPI_REG_DELAY_TSLCH_LSB		0
 127#define CQSPI_REG_DELAY_TCHSH_LSB		8
 128#define CQSPI_REG_DELAY_TSD2D_LSB		16
 129#define CQSPI_REG_DELAY_TSHSL_LSB		24
 130#define CQSPI_REG_DELAY_TSLCH_MASK		0xFF
 131#define CQSPI_REG_DELAY_TCHSH_MASK		0xFF
 132#define CQSPI_REG_DELAY_TSD2D_MASK		0xFF
 133#define CQSPI_REG_DELAY_TSHSL_MASK		0xFF
 134
 135#define CQSPI_REG_READCAPTURE			0x10
 136#define CQSPI_REG_READCAPTURE_BYPASS_LSB	0
 137#define CQSPI_REG_READCAPTURE_DELAY_LSB		1
 138#define CQSPI_REG_READCAPTURE_DELAY_MASK	0xF
 139
 140#define CQSPI_REG_SIZE				0x14
 141#define CQSPI_REG_SIZE_ADDRESS_LSB		0
 142#define CQSPI_REG_SIZE_PAGE_LSB			4
 143#define CQSPI_REG_SIZE_BLOCK_LSB		16
 144#define CQSPI_REG_SIZE_ADDRESS_MASK		0xF
 145#define CQSPI_REG_SIZE_PAGE_MASK		0xFFF
 146#define CQSPI_REG_SIZE_BLOCK_MASK		0x3F
 147
 148#define CQSPI_REG_SRAMPARTITION			0x18
 149#define CQSPI_REG_INDIRECTTRIGGER		0x1C
 150
 151#define CQSPI_REG_DMA				0x20
 152#define CQSPI_REG_DMA_SINGLE_LSB		0
 153#define CQSPI_REG_DMA_BURST_LSB			8
 154#define CQSPI_REG_DMA_SINGLE_MASK		0xFF
 155#define CQSPI_REG_DMA_BURST_MASK		0xFF
 156
 157#define CQSPI_REG_REMAP				0x24
 158#define CQSPI_REG_MODE_BIT			0x28
 159
 160#define CQSPI_REG_SDRAMLEVEL			0x2C
 161#define CQSPI_REG_SDRAMLEVEL_RD_LSB		0
 162#define CQSPI_REG_SDRAMLEVEL_WR_LSB		16
 163#define CQSPI_REG_SDRAMLEVEL_RD_MASK		0xFFFF
 164#define CQSPI_REG_SDRAMLEVEL_WR_MASK		0xFFFF
 165
 166#define CQSPI_REG_IRQSTATUS			0x40
 167#define CQSPI_REG_IRQMASK			0x44
 168
 169#define CQSPI_REG_INDIRECTRD			0x60
 170#define CQSPI_REG_INDIRECTRD_START_MASK		BIT(0)
 171#define CQSPI_REG_INDIRECTRD_CANCEL_MASK	BIT(1)
 172#define CQSPI_REG_INDIRECTRD_DONE_MASK		BIT(5)
 173
 174#define CQSPI_REG_INDIRECTRDWATERMARK		0x64
 175#define CQSPI_REG_INDIRECTRDSTARTADDR		0x68
 176#define CQSPI_REG_INDIRECTRDBYTES		0x6C
 177
 178#define CQSPI_REG_CMDCTRL			0x90
 179#define CQSPI_REG_CMDCTRL_EXECUTE_MASK		BIT(0)
 180#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK	BIT(1)
 181#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB		12
 182#define CQSPI_REG_CMDCTRL_WR_EN_LSB		15
 183#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB		16
 184#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB		19
 185#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB		20
 186#define CQSPI_REG_CMDCTRL_RD_EN_LSB		23
 187#define CQSPI_REG_CMDCTRL_OPCODE_LSB		24
 188#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK		0x7
 189#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK	0x3
 190#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK		0x7
 191
 192#define CQSPI_REG_INDIRECTWR			0x70
 193#define CQSPI_REG_INDIRECTWR_START_MASK		BIT(0)
 194#define CQSPI_REG_INDIRECTWR_CANCEL_MASK	BIT(1)
 195#define CQSPI_REG_INDIRECTWR_DONE_MASK		BIT(5)
 196
 197#define CQSPI_REG_INDIRECTWRWATERMARK		0x74
 198#define CQSPI_REG_INDIRECTWRSTARTADDR		0x78
 199#define CQSPI_REG_INDIRECTWRBYTES		0x7C
 200
 201#define CQSPI_REG_CMDADDRESS			0x94
 202#define CQSPI_REG_CMDREADDATALOWER		0xA0
 203#define CQSPI_REG_CMDREADDATAUPPER		0xA4
 204#define CQSPI_REG_CMDWRITEDATALOWER		0xA8
 205#define CQSPI_REG_CMDWRITEDATAUPPER		0xAC
 206
 207/* Interrupt status bits */
 208#define CQSPI_REG_IRQ_MODE_ERR			BIT(0)
 209#define CQSPI_REG_IRQ_UNDERFLOW			BIT(1)
 210#define CQSPI_REG_IRQ_IND_COMP			BIT(2)
 211#define CQSPI_REG_IRQ_IND_RD_REJECT		BIT(3)
 212#define CQSPI_REG_IRQ_WR_PROTECTED_ERR		BIT(4)
 213#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR		BIT(5)
 214#define CQSPI_REG_IRQ_WATERMARK			BIT(6)
 215#define CQSPI_REG_IRQ_IND_SRAM_FULL		BIT(12)
 216
 217#define CQSPI_IRQ_MASK_RD		(CQSPI_REG_IRQ_WATERMARK	| \
 218					 CQSPI_REG_IRQ_IND_SRAM_FULL	| \
 219					 CQSPI_REG_IRQ_IND_COMP)
 220
 221#define CQSPI_IRQ_MASK_WR		(CQSPI_REG_IRQ_IND_COMP		| \
 222					 CQSPI_REG_IRQ_WATERMARK	| \
 223					 CQSPI_REG_IRQ_UNDERFLOW)
 224
 225#define CQSPI_IRQ_STATUS_MASK		0x1FFFF
 226
 227static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear)
 228{
 229	unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
 230	u32 val;
 231
 232	while (1) {
 233		val = readl(reg);
 234		if (clear)
 235			val = ~val;
 236		val &= mask;
 237
 238		if (val == mask)
 239			return 0;
 240
 241		if (time_after(jiffies, end))
 242			return -ETIMEDOUT;
 243	}
 244}
 245
 246static bool cqspi_is_idle(struct cqspi_st *cqspi)
 247{
 248	u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
 249
 250	return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
 251}
 252
 253static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
 254{
 255	u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
 256
 257	reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
 258	return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
 259}
 260
 261static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
 262{
 263	struct cqspi_st *cqspi = dev;
 264	unsigned int irq_status;
 265
 266	/* Read interrupt status */
 267	irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
 268
 269	/* Clear interrupt */
 270	writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
 271
 272	irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
 273
 274	if (irq_status)
 275		complete(&cqspi->transfer_complete);
 276
 277	return IRQ_HANDLED;
 278}
 279
 280static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
 281{
 282	struct cqspi_flash_pdata *f_pdata = nor->priv;
 283	u32 rdreg = 0;
 284
 285	rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
 286	rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
 287	rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
 288
 289	return rdreg;
 290}
 291
 292static int cqspi_wait_idle(struct cqspi_st *cqspi)
 293{
 294	const unsigned int poll_idle_retry = 3;
 295	unsigned int count = 0;
 296	unsigned long timeout;
 297
 298	timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
 299	while (1) {
 300		/*
 301		 * Read few times in succession to ensure the controller
 302		 * is indeed idle, that is, the bit does not transition
 303		 * low again.
 304		 */
 305		if (cqspi_is_idle(cqspi))
 306			count++;
 307		else
 308			count = 0;
 309
 310		if (count >= poll_idle_retry)
 311			return 0;
 312
 313		if (time_after(jiffies, timeout)) {
 314			/* Timeout, in busy mode. */
 315			dev_err(&cqspi->pdev->dev,
 316				"QSPI is still busy after %dms timeout.\n",
 317				CQSPI_TIMEOUT_MS);
 318			return -ETIMEDOUT;
 319		}
 320
 321		cpu_relax();
 322	}
 323}
 324
 325static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
 326{
 327	void __iomem *reg_base = cqspi->iobase;
 328	int ret;
 329
 330	/* Write the CMDCTRL without start execution. */
 331	writel(reg, reg_base + CQSPI_REG_CMDCTRL);
 332	/* Start execute */
 333	reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
 334	writel(reg, reg_base + CQSPI_REG_CMDCTRL);
 335
 336	/* Polling for completion. */
 337	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
 338				 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
 339	if (ret) {
 340		dev_err(&cqspi->pdev->dev,
 341			"Flash command execution timed out.\n");
 342		return ret;
 343	}
 344
 345	/* Polling QSPI idle status. */
 346	return cqspi_wait_idle(cqspi);
 347}
 348
 349static int cqspi_command_read(struct spi_nor *nor,
 350			      const u8 *txbuf, const unsigned n_tx,
 351			      u8 *rxbuf, const unsigned n_rx)
 352{
 353	struct cqspi_flash_pdata *f_pdata = nor->priv;
 354	struct cqspi_st *cqspi = f_pdata->cqspi;
 355	void __iomem *reg_base = cqspi->iobase;
 356	unsigned int rdreg;
 357	unsigned int reg;
 358	unsigned int read_len;
 359	int status;
 360
 361	if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
 362		dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
 363			n_rx, rxbuf);
 364		return -EINVAL;
 365	}
 366
 367	reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
 368
 369	rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
 370	writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
 371
 372	reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
 373
 374	/* 0 means 1 byte. */
 375	reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
 376		<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
 377	status = cqspi_exec_flash_cmd(cqspi, reg);
 378	if (status)
 379		return status;
 380
 381	reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
 382
 383	/* Put the read value into rx_buf */
 384	read_len = (n_rx > 4) ? 4 : n_rx;
 385	memcpy(rxbuf, &reg, read_len);
 386	rxbuf += read_len;
 387
 388	if (n_rx > 4) {
 389		reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
 390
 391		read_len = n_rx - read_len;
 392		memcpy(rxbuf, &reg, read_len);
 393	}
 394
 395	return 0;
 396}
 397
 398static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
 399			       const u8 *txbuf, const unsigned n_tx)
 400{
 401	struct cqspi_flash_pdata *f_pdata = nor->priv;
 402	struct cqspi_st *cqspi = f_pdata->cqspi;
 403	void __iomem *reg_base = cqspi->iobase;
 404	unsigned int reg;
 405	unsigned int data;
 406	int ret;
 407
 408	if (n_tx > 4 || (n_tx && !txbuf)) {
 409		dev_err(nor->dev,
 410			"Invalid input argument, cmdlen %d txbuf 0x%p\n",
 411			n_tx, txbuf);
 412		return -EINVAL;
 413	}
 414
 415	reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
 416	if (n_tx) {
 417		reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
 418		reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
 419			<< CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
 420		data = 0;
 421		memcpy(&data, txbuf, n_tx);
 422		writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
 423	}
 424
 425	ret = cqspi_exec_flash_cmd(cqspi, reg);
 426	return ret;
 427}
 428
 429static int cqspi_command_write_addr(struct spi_nor *nor,
 430				    const u8 opcode, const unsigned int addr)
 431{
 432	struct cqspi_flash_pdata *f_pdata = nor->priv;
 433	struct cqspi_st *cqspi = f_pdata->cqspi;
 434	void __iomem *reg_base = cqspi->iobase;
 435	unsigned int reg;
 436
 437	reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
 438	reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
 439	reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
 440		<< CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
 441
 442	writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
 443
 444	return cqspi_exec_flash_cmd(cqspi, reg);
 445}
 446
 447static int cqspi_indirect_read_setup(struct spi_nor *nor,
 448				     const unsigned int from_addr)
 449{
 450	struct cqspi_flash_pdata *f_pdata = nor->priv;
 451	struct cqspi_st *cqspi = f_pdata->cqspi;
 452	void __iomem *reg_base = cqspi->iobase;
 453	unsigned int dummy_clk = 0;
 454	unsigned int reg;
 455
 456	writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
 457
 458	reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
 459	reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
 460
 461	/* Setup dummy clock cycles */
 462	dummy_clk = nor->read_dummy;
 463	if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
 464		dummy_clk = CQSPI_DUMMY_CLKS_MAX;
 465
 466	if (dummy_clk / 8) {
 467		reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
 468		/* Set mode bits high to ensure chip doesn't enter XIP */
 469		writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
 470
 471		/* Need to subtract the mode byte (8 clocks). */
 472		if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
 473			dummy_clk -= 8;
 474
 475		if (dummy_clk)
 476			reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
 477			       << CQSPI_REG_RD_INSTR_DUMMY_LSB;
 478	}
 479
 480	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
 481
 482	/* Set address width */
 483	reg = readl(reg_base + CQSPI_REG_SIZE);
 484	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
 485	reg |= (nor->addr_width - 1);
 486	writel(reg, reg_base + CQSPI_REG_SIZE);
 487	return 0;
 488}
 489
 490static int cqspi_indirect_read_execute(struct spi_nor *nor,
 491				       u8 *rxbuf, const unsigned n_rx)
 492{
 493	struct cqspi_flash_pdata *f_pdata = nor->priv;
 494	struct cqspi_st *cqspi = f_pdata->cqspi;
 495	void __iomem *reg_base = cqspi->iobase;
 496	void __iomem *ahb_base = cqspi->ahb_base;
 497	unsigned int remaining = n_rx;
 498	unsigned int bytes_to_read = 0;
 499	int ret = 0;
 500
 501	writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
 502
 503	/* Clear all interrupts. */
 504	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
 505
 506	writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
 507
 508	reinit_completion(&cqspi->transfer_complete);
 509	writel(CQSPI_REG_INDIRECTRD_START_MASK,
 510	       reg_base + CQSPI_REG_INDIRECTRD);
 511
 512	while (remaining > 0) {
 513		ret = wait_for_completion_timeout(&cqspi->transfer_complete,
 514						  msecs_to_jiffies
 515						  (CQSPI_READ_TIMEOUT_MS));
 516
 517		bytes_to_read = cqspi_get_rd_sram_level(cqspi);
 518
 519		if (!ret && bytes_to_read == 0) {
 520			dev_err(nor->dev, "Indirect read timeout, no bytes\n");
 521			ret = -ETIMEDOUT;
 522			goto failrd;
 523		}
 524
 525		while (bytes_to_read != 0) {
 526			bytes_to_read *= cqspi->fifo_width;
 527			bytes_to_read = bytes_to_read > remaining ?
 528					remaining : bytes_to_read;
 529			readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4));
 530			rxbuf += bytes_to_read;
 531			remaining -= bytes_to_read;
 532			bytes_to_read = cqspi_get_rd_sram_level(cqspi);
 533		}
 534
 535		if (remaining > 0)
 536			reinit_completion(&cqspi->transfer_complete);
 537	}
 538
 539	/* Check indirect done status */
 540	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
 541				 CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
 542	if (ret) {
 543		dev_err(nor->dev,
 544			"Indirect read completion error (%i)\n", ret);
 545		goto failrd;
 546	}
 547
 548	/* Disable interrupt */
 549	writel(0, reg_base + CQSPI_REG_IRQMASK);
 550
 551	/* Clear indirect completion status */
 552	writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
 553
 554	return 0;
 555
 556failrd:
 557	/* Disable interrupt */
 558	writel(0, reg_base + CQSPI_REG_IRQMASK);
 559
 560	/* Cancel the indirect read */
 561	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
 562	       reg_base + CQSPI_REG_INDIRECTRD);
 563	return ret;
 564}
 565
 566static int cqspi_indirect_write_setup(struct spi_nor *nor,
 567				      const unsigned int to_addr)
 568{
 569	unsigned int reg;
 570	struct cqspi_flash_pdata *f_pdata = nor->priv;
 571	struct cqspi_st *cqspi = f_pdata->cqspi;
 572	void __iomem *reg_base = cqspi->iobase;
 573
 574	/* Set opcode. */
 575	reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
 576	writel(reg, reg_base + CQSPI_REG_WR_INSTR);
 577	reg = cqspi_calc_rdreg(nor, nor->program_opcode);
 578	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
 579
 580	writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
 581
 582	reg = readl(reg_base + CQSPI_REG_SIZE);
 583	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
 584	reg |= (nor->addr_width - 1);
 585	writel(reg, reg_base + CQSPI_REG_SIZE);
 586	return 0;
 587}
 588
 589static int cqspi_indirect_write_execute(struct spi_nor *nor,
 590					const u8 *txbuf, const unsigned n_tx)
 591{
 592	const unsigned int page_size = nor->page_size;
 593	struct cqspi_flash_pdata *f_pdata = nor->priv;
 594	struct cqspi_st *cqspi = f_pdata->cqspi;
 595	void __iomem *reg_base = cqspi->iobase;
 596	unsigned int remaining = n_tx;
 597	unsigned int write_bytes;
 598	int ret;
 599
 600	writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
 601
 602	/* Clear all interrupts. */
 603	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
 604
 605	writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
 606
 607	reinit_completion(&cqspi->transfer_complete);
 608	writel(CQSPI_REG_INDIRECTWR_START_MASK,
 609	       reg_base + CQSPI_REG_INDIRECTWR);
 610
 611	while (remaining > 0) {
 612		write_bytes = remaining > page_size ? page_size : remaining;
 613		writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4));
 614
 615		ret = wait_for_completion_timeout(&cqspi->transfer_complete,
 616						  msecs_to_jiffies
 617						  (CQSPI_TIMEOUT_MS));
 618		if (!ret) {
 619			dev_err(nor->dev, "Indirect write timeout\n");
 620			ret = -ETIMEDOUT;
 621			goto failwr;
 622		}
 623
 624		txbuf += write_bytes;
 625		remaining -= write_bytes;
 626
 627		if (remaining > 0)
 628			reinit_completion(&cqspi->transfer_complete);
 629	}
 630
 631	/* Check indirect done status */
 632	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
 633				 CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
 634	if (ret) {
 635		dev_err(nor->dev,
 636			"Indirect write completion error (%i)\n", ret);
 637		goto failwr;
 638	}
 639
 640	/* Disable interrupt. */
 641	writel(0, reg_base + CQSPI_REG_IRQMASK);
 642
 643	/* Clear indirect completion status */
 644	writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
 645
 646	cqspi_wait_idle(cqspi);
 647
 648	return 0;
 649
 650failwr:
 651	/* Disable interrupt. */
 652	writel(0, reg_base + CQSPI_REG_IRQMASK);
 653
 654	/* Cancel the indirect write */
 655	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
 656	       reg_base + CQSPI_REG_INDIRECTWR);
 657	return ret;
 658}
 659
 660static void cqspi_chipselect(struct spi_nor *nor)
 661{
 662	struct cqspi_flash_pdata *f_pdata = nor->priv;
 663	struct cqspi_st *cqspi = f_pdata->cqspi;
 664	void __iomem *reg_base = cqspi->iobase;
 665	unsigned int chip_select = f_pdata->cs;
 666	unsigned int reg;
 667
 668	reg = readl(reg_base + CQSPI_REG_CONFIG);
 669	if (cqspi->is_decoded_cs) {
 670		reg |= CQSPI_REG_CONFIG_DECODE_MASK;
 671	} else {
 672		reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
 673
 674		/* Convert CS if without decoder.
 675		 * CS0 to 4b'1110
 676		 * CS1 to 4b'1101
 677		 * CS2 to 4b'1011
 678		 * CS3 to 4b'0111
 679		 */
 680		chip_select = 0xF & ~(1 << chip_select);
 681	}
 682
 683	reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
 684		 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
 685	reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
 686	    << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
 687	writel(reg, reg_base + CQSPI_REG_CONFIG);
 688}
 689
 690static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
 691{
 692	struct cqspi_flash_pdata *f_pdata = nor->priv;
 693	struct cqspi_st *cqspi = f_pdata->cqspi;
 694	void __iomem *iobase = cqspi->iobase;
 695	unsigned int reg;
 696
 697	/* configure page size and block size. */
 698	reg = readl(iobase + CQSPI_REG_SIZE);
 699	reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
 700	reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
 701	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
 702	reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
 703	reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
 704	reg |= (nor->addr_width - 1);
 705	writel(reg, iobase + CQSPI_REG_SIZE);
 706
 707	/* configure the chip select */
 708	cqspi_chipselect(nor);
 709
 710	/* Store the new configuration of the controller */
 711	cqspi->current_page_size = nor->page_size;
 712	cqspi->current_erase_size = nor->mtd.erasesize;
 713	cqspi->current_addr_width = nor->addr_width;
 714}
 715
 716static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
 717					   const unsigned int ns_val)
 718{
 719	unsigned int ticks;
 720
 721	ticks = ref_clk_hz / 1000;	/* kHz */
 722	ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
 723
 724	return ticks;
 725}
 726
 727static void cqspi_delay(struct spi_nor *nor)
 728{
 729	struct cqspi_flash_pdata *f_pdata = nor->priv;
 730	struct cqspi_st *cqspi = f_pdata->cqspi;
 731	void __iomem *iobase = cqspi->iobase;
 732	const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
 733	unsigned int tshsl, tchsh, tslch, tsd2d;
 734	unsigned int reg;
 735	unsigned int tsclk;
 736
 737	/* calculate the number of ref ticks for one sclk tick */
 738	tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
 739
 740	tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
 741	/* this particular value must be at least one sclk */
 742	if (tshsl < tsclk)
 743		tshsl = tsclk;
 744
 745	tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
 746	tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
 747	tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
 748
 749	reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
 750	       << CQSPI_REG_DELAY_TSHSL_LSB;
 751	reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
 752		<< CQSPI_REG_DELAY_TCHSH_LSB;
 753	reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
 754		<< CQSPI_REG_DELAY_TSLCH_LSB;
 755	reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
 756		<< CQSPI_REG_DELAY_TSD2D_LSB;
 757	writel(reg, iobase + CQSPI_REG_DELAY);
 758}
 759
 760static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
 761{
 762	const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
 763	void __iomem *reg_base = cqspi->iobase;
 764	u32 reg, div;
 765
 766	/* Recalculate the baudrate divisor based on QSPI specification. */
 767	div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
 768
 769	reg = readl(reg_base + CQSPI_REG_CONFIG);
 770	reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
 771	reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
 772	writel(reg, reg_base + CQSPI_REG_CONFIG);
 773}
 774
 775static void cqspi_readdata_capture(struct cqspi_st *cqspi,
 776				   const unsigned int bypass,
 777				   const unsigned int delay)
 778{
 779	void __iomem *reg_base = cqspi->iobase;
 780	unsigned int reg;
 781
 782	reg = readl(reg_base + CQSPI_REG_READCAPTURE);
 783
 784	if (bypass)
 785		reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
 786	else
 787		reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
 788
 789	reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
 790		 << CQSPI_REG_READCAPTURE_DELAY_LSB);
 791
 792	reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
 793		<< CQSPI_REG_READCAPTURE_DELAY_LSB;
 794
 795	writel(reg, reg_base + CQSPI_REG_READCAPTURE);
 796}
 797
 798static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
 799{
 800	void __iomem *reg_base = cqspi->iobase;
 801	unsigned int reg;
 802
 803	reg = readl(reg_base + CQSPI_REG_CONFIG);
 804
 805	if (enable)
 806		reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
 807	else
 808		reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
 809
 810	writel(reg, reg_base + CQSPI_REG_CONFIG);
 811}
 812
 813static void cqspi_configure(struct spi_nor *nor)
 814{
 815	struct cqspi_flash_pdata *f_pdata = nor->priv;
 816	struct cqspi_st *cqspi = f_pdata->cqspi;
 817	const unsigned int sclk = f_pdata->clk_rate;
 818	int switch_cs = (cqspi->current_cs != f_pdata->cs);
 819	int switch_ck = (cqspi->sclk != sclk);
 820
 821	if ((cqspi->current_page_size != nor->page_size) ||
 822	    (cqspi->current_erase_size != nor->mtd.erasesize) ||
 823	    (cqspi->current_addr_width != nor->addr_width))
 824		switch_cs = 1;
 825
 826	if (switch_cs || switch_ck)
 827		cqspi_controller_enable(cqspi, 0);
 828
 829	/* Switch chip select. */
 830	if (switch_cs) {
 831		cqspi->current_cs = f_pdata->cs;
 832		cqspi_configure_cs_and_sizes(nor);
 833	}
 834
 835	/* Setup baudrate divisor and delays */
 836	if (switch_ck) {
 837		cqspi->sclk = sclk;
 838		cqspi_config_baudrate_div(cqspi);
 839		cqspi_delay(nor);
 840		cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
 841	}
 842
 843	if (switch_cs || switch_ck)
 844		cqspi_controller_enable(cqspi, 1);
 845}
 846
 847static int cqspi_set_protocol(struct spi_nor *nor, const int read)
 848{
 849	struct cqspi_flash_pdata *f_pdata = nor->priv;
 850
 851	f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
 852	f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
 853	f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
 854
 855	if (read) {
 856		switch (nor->flash_read) {
 857		case SPI_NOR_NORMAL:
 858		case SPI_NOR_FAST:
 859			f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
 860			break;
 861		case SPI_NOR_DUAL:
 862			f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
 863			break;
 864		case SPI_NOR_QUAD:
 865			f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
 866			break;
 867		default:
 868			return -EINVAL;
 869		}
 870	}
 871
 872	cqspi_configure(nor);
 873
 874	return 0;
 875}
 876
 877static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
 878			   size_t len, const u_char *buf)
 879{
 880	int ret;
 881
 882	ret = cqspi_set_protocol(nor, 0);
 883	if (ret)
 884		return ret;
 885
 886	ret = cqspi_indirect_write_setup(nor, to);
 887	if (ret)
 888		return ret;
 889
 890	ret = cqspi_indirect_write_execute(nor, buf, len);
 891	if (ret)
 892		return ret;
 893
 894	return (ret < 0) ? ret : len;
 895}
 896
 897static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
 898			  size_t len, u_char *buf)
 899{
 900	int ret;
 901
 902	ret = cqspi_set_protocol(nor, 1);
 903	if (ret)
 904		return ret;
 905
 906	ret = cqspi_indirect_read_setup(nor, from);
 907	if (ret)
 908		return ret;
 909
 910	ret = cqspi_indirect_read_execute(nor, buf, len);
 911	if (ret)
 912		return ret;
 913
 914	return (ret < 0) ? ret : len;
 915}
 916
 917static int cqspi_erase(struct spi_nor *nor, loff_t offs)
 918{
 919	int ret;
 920
 921	ret = cqspi_set_protocol(nor, 0);
 922	if (ret)
 923		return ret;
 924
 925	/* Send write enable, then erase commands. */
 926	ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
 927	if (ret)
 928		return ret;
 929
 930	/* Set up command buffer. */
 931	ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
 932	if (ret)
 933		return ret;
 934
 935	return 0;
 936}
 937
 938static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
 939{
 940	struct cqspi_flash_pdata *f_pdata = nor->priv;
 941	struct cqspi_st *cqspi = f_pdata->cqspi;
 942
 943	mutex_lock(&cqspi->bus_mutex);
 944
 945	return 0;
 946}
 947
 948static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
 949{
 950	struct cqspi_flash_pdata *f_pdata = nor->priv;
 951	struct cqspi_st *cqspi = f_pdata->cqspi;
 952
 953	mutex_unlock(&cqspi->bus_mutex);
 954}
 955
 956static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
 957{
 958	int ret;
 959
 960	ret = cqspi_set_protocol(nor, 0);
 961	if (!ret)
 962		ret = cqspi_command_read(nor, &opcode, 1, buf, len);
 963
 964	return ret;
 965}
 966
 967static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
 968{
 969	int ret;
 970
 971	ret = cqspi_set_protocol(nor, 0);
 972	if (!ret)
 973		ret = cqspi_command_write(nor, opcode, buf, len);
 974
 975	return ret;
 976}
 977
 978static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
 979				    struct cqspi_flash_pdata *f_pdata,
 980				    struct device_node *np)
 981{
 982	if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
 983		dev_err(&pdev->dev, "couldn't determine read-delay\n");
 984		return -ENXIO;
 985	}
 986
 987	if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
 988		dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
 989		return -ENXIO;
 990	}
 991
 992	if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
 993		dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
 994		return -ENXIO;
 995	}
 996
 997	if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
 998		dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
 999		return -ENXIO;
1000	}
1001
1002	if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
1003		dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
1004		return -ENXIO;
1005	}
1006
1007	if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
1008		dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
1009		return -ENXIO;
1010	}
1011
1012	return 0;
1013}
1014
1015static int cqspi_of_get_pdata(struct platform_device *pdev)
1016{
1017	struct device_node *np = pdev->dev.of_node;
1018	struct cqspi_st *cqspi = platform_get_drvdata(pdev);
1019
1020	cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
1021
1022	if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
1023		dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
1024		return -ENXIO;
1025	}
1026
1027	if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
1028		dev_err(&pdev->dev, "couldn't determine fifo-width\n");
1029		return -ENXIO;
1030	}
1031
1032	if (of_property_read_u32(np, "cdns,trigger-address",
1033				 &cqspi->trigger_address)) {
1034		dev_err(&pdev->dev, "couldn't determine trigger-address\n");
1035		return -ENXIO;
1036	}
1037
1038	return 0;
1039}
1040
1041static void cqspi_controller_init(struct cqspi_st *cqspi)
1042{
1043	cqspi_controller_enable(cqspi, 0);
1044
1045	/* Configure the remap address register, no remap */
1046	writel(0, cqspi->iobase + CQSPI_REG_REMAP);
1047
1048	/* Disable all interrupts. */
1049	writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
1050
1051	/* Configure the SRAM split to 1:1 . */
1052	writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
1053
1054	/* Load indirect trigger address. */
1055	writel(cqspi->trigger_address,
1056	       cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
1057
1058	/* Program read watermark -- 1/2 of the FIFO. */
1059	writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
1060	       cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
1061	/* Program write watermark -- 1/8 of the FIFO. */
1062	writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
1063	       cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
1064
1065	cqspi_controller_enable(cqspi, 1);
1066}
1067
1068static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
1069{
1070	struct platform_device *pdev = cqspi->pdev;
1071	struct device *dev = &pdev->dev;
1072	struct cqspi_flash_pdata *f_pdata;
1073	struct spi_nor *nor;
1074	struct mtd_info *mtd;
1075	unsigned int cs;
1076	int i, ret;
1077
1078	/* Get flash device data */
1079	for_each_available_child_of_node(dev->of_node, np) {
1080		ret = of_property_read_u32(np, "reg", &cs);
1081		if (ret) {
1082			dev_err(dev, "Couldn't determine chip select.\n");
1083			goto err;
1084		}
1085
1086		if (cs >= CQSPI_MAX_CHIPSELECT) {
1087			ret = -EINVAL;
1088			dev_err(dev, "Chip select %d out of range.\n", cs);
1089			goto err;
1090		}
1091
1092		f_pdata = &cqspi->f_pdata[cs];
1093		f_pdata->cqspi = cqspi;
1094		f_pdata->cs = cs;
1095
1096		ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
1097		if (ret)
1098			goto err;
1099
1100		nor = &f_pdata->nor;
1101		mtd = &nor->mtd;
1102
1103		mtd->priv = nor;
1104
1105		nor->dev = dev;
1106		spi_nor_set_flash_node(nor, np);
1107		nor->priv = f_pdata;
1108
1109		nor->read_reg = cqspi_read_reg;
1110		nor->write_reg = cqspi_write_reg;
1111		nor->read = cqspi_read;
1112		nor->write = cqspi_write;
1113		nor->erase = cqspi_erase;
1114		nor->prepare = cqspi_prep;
1115		nor->unprepare = cqspi_unprep;
1116
1117		mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
1118					   dev_name(dev), cs);
1119		if (!mtd->name) {
1120			ret = -ENOMEM;
1121			goto err;
1122		}
1123
1124		ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
1125		if (ret)
1126			goto err;
1127
1128		ret = mtd_device_register(mtd, NULL, 0);
1129		if (ret)
1130			goto err;
1131
1132		f_pdata->registered = true;
1133	}
1134
1135	return 0;
1136
1137err:
1138	for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
1139		if (cqspi->f_pdata[i].registered)
1140			mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
1141	return ret;
1142}
1143
1144static int cqspi_probe(struct platform_device *pdev)
1145{
1146	struct device_node *np = pdev->dev.of_node;
1147	struct device *dev = &pdev->dev;
1148	struct cqspi_st *cqspi;
1149	struct resource *res;
1150	struct resource *res_ahb;
1151	int ret;
1152	int irq;
1153
1154	cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
1155	if (!cqspi)
1156		return -ENOMEM;
1157
1158	mutex_init(&cqspi->bus_mutex);
1159	cqspi->pdev = pdev;
1160	platform_set_drvdata(pdev, cqspi);
1161
1162	/* Obtain configuration from OF. */
1163	ret = cqspi_of_get_pdata(pdev);
1164	if (ret) {
1165		dev_err(dev, "Cannot get mandatory OF data.\n");
1166		return -ENODEV;
1167	}
1168
1169	/* Obtain QSPI clock. */
1170	cqspi->clk = devm_clk_get(dev, NULL);
1171	if (IS_ERR(cqspi->clk)) {
1172		dev_err(dev, "Cannot claim QSPI clock.\n");
1173		return PTR_ERR(cqspi->clk);
1174	}
1175
1176	/* Obtain and remap controller address. */
1177	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1178	cqspi->iobase = devm_ioremap_resource(dev, res);
1179	if (IS_ERR(cqspi->iobase)) {
1180		dev_err(dev, "Cannot remap controller address.\n");
1181		return PTR_ERR(cqspi->iobase);
1182	}
1183
1184	/* Obtain and remap AHB address. */
1185	res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1186	cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
1187	if (IS_ERR(cqspi->ahb_base)) {
1188		dev_err(dev, "Cannot remap AHB address.\n");
1189		return PTR_ERR(cqspi->ahb_base);
1190	}
1191
1192	init_completion(&cqspi->transfer_complete);
1193
1194	/* Obtain IRQ line. */
1195	irq = platform_get_irq(pdev, 0);
1196	if (irq < 0) {
1197		dev_err(dev, "Cannot obtain IRQ.\n");
1198		return -ENXIO;
1199	}
1200
1201	ret = clk_prepare_enable(cqspi->clk);
1202	if (ret) {
1203		dev_err(dev, "Cannot enable QSPI clock.\n");
1204		return ret;
1205	}
1206
1207	cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
1208
1209	ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
1210			       pdev->name, cqspi);
1211	if (ret) {
1212		dev_err(dev, "Cannot request IRQ.\n");
1213		goto probe_irq_failed;
1214	}
1215
1216	cqspi_wait_idle(cqspi);
1217	cqspi_controller_init(cqspi);
1218	cqspi->current_cs = -1;
1219	cqspi->sclk = 0;
1220
1221	ret = cqspi_setup_flash(cqspi, np);
1222	if (ret) {
1223		dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
1224		goto probe_setup_failed;
1225	}
1226
1227	return ret;
1228probe_irq_failed:
1229	cqspi_controller_enable(cqspi, 0);
1230probe_setup_failed:
1231	clk_disable_unprepare(cqspi->clk);
1232	return ret;
1233}
1234
1235static int cqspi_remove(struct platform_device *pdev)
1236{
1237	struct cqspi_st *cqspi = platform_get_drvdata(pdev);
1238	int i;
1239
1240	for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
1241		if (cqspi->f_pdata[i].registered)
1242			mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
1243
1244	cqspi_controller_enable(cqspi, 0);
1245
1246	clk_disable_unprepare(cqspi->clk);
1247
1248	return 0;
1249}
1250
1251#ifdef CONFIG_PM_SLEEP
1252static int cqspi_suspend(struct device *dev)
1253{
1254	struct cqspi_st *cqspi = dev_get_drvdata(dev);
1255
1256	cqspi_controller_enable(cqspi, 0);
1257	return 0;
1258}
1259
1260static int cqspi_resume(struct device *dev)
1261{
1262	struct cqspi_st *cqspi = dev_get_drvdata(dev);
1263
1264	cqspi_controller_enable(cqspi, 1);
1265	return 0;
1266}
1267
1268static const struct dev_pm_ops cqspi__dev_pm_ops = {
1269	.suspend = cqspi_suspend,
1270	.resume = cqspi_resume,
1271};
1272
1273#define CQSPI_DEV_PM_OPS	(&cqspi__dev_pm_ops)
1274#else
1275#define CQSPI_DEV_PM_OPS	NULL
1276#endif
1277
1278static struct of_device_id const cqspi_dt_ids[] = {
1279	{.compatible = "cdns,qspi-nor",},
1280	{ /* end of table */ }
1281};
1282
1283MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
1284
1285static struct platform_driver cqspi_platform_driver = {
1286	.probe = cqspi_probe,
1287	.remove = cqspi_remove,
1288	.driver = {
1289		.name = CQSPI_NAME,
1290		.pm = CQSPI_DEV_PM_OPS,
1291		.of_match_table = cqspi_dt_ids,
1292	},
1293};
1294
1295module_platform_driver(cqspi_platform_driver);
1296
1297MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
1298MODULE_LICENSE("GPL v2");
1299MODULE_ALIAS("platform:" CQSPI_NAME);
1300MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
1301MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");