Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3/*
   4 * Freescale QuadSPI driver.
   5 *
   6 * Copyright (C) 2013 Freescale Semiconductor, Inc.
   7 * Copyright (C) 2018 Bootlin
   8 * Copyright (C) 2018 exceet electronics GmbH
   9 * Copyright (C) 2018 Kontron Electronics GmbH
  10 *
  11 * Transition to SPI MEM interface:
  12 * Authors:
  13 *     Boris Brezillon <bbrezillon@kernel.org>
  14 *     Frieder Schrempf <frieder.schrempf@kontron.de>
  15 *     Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
  16 *     Suresh Gupta <suresh.gupta@nxp.com>
  17 *
  18 * Based on the original fsl-quadspi.c SPI NOR driver:
  19 * Author: Freescale Semiconductor, Inc.
  20 *
  21 */
  22
  23#include <linux/bitops.h>
  24#include <linux/clk.h>
  25#include <linux/completion.h>
  26#include <linux/delay.h>
  27#include <linux/err.h>
  28#include <linux/errno.h>
  29#include <linux/interrupt.h>
  30#include <linux/io.h>
  31#include <linux/iopoll.h>
  32#include <linux/jiffies.h>
  33#include <linux/kernel.h>
  34#include <linux/module.h>
  35#include <linux/mutex.h>
  36#include <linux/of.h>
  37#include <linux/of_device.h>
  38#include <linux/platform_device.h>
  39#include <linux/pm_qos.h>
  40#include <linux/sizes.h>
  41
  42#include <linux/spi/spi.h>
  43#include <linux/spi/spi-mem.h>
  44
  45/*
  46 * The driver only uses one single LUT entry, that is updated on
  47 * each call of exec_op(). Index 0 is preset at boot with a basic
  48 * read operation, so let's use the last entry (15).
  49 */
  50#define	SEQID_LUT			15
  51
  52/* Registers used by the driver */
  53#define QUADSPI_MCR			0x00
  54#define QUADSPI_MCR_RESERVED_MASK	GENMASK(19, 16)
  55#define QUADSPI_MCR_MDIS_MASK		BIT(14)
  56#define QUADSPI_MCR_CLR_TXF_MASK	BIT(11)
  57#define QUADSPI_MCR_CLR_RXF_MASK	BIT(10)
  58#define QUADSPI_MCR_DDR_EN_MASK		BIT(7)
  59#define QUADSPI_MCR_END_CFG_MASK	GENMASK(3, 2)
  60#define QUADSPI_MCR_SWRSTHD_MASK	BIT(1)
  61#define QUADSPI_MCR_SWRSTSD_MASK	BIT(0)
  62
  63#define QUADSPI_IPCR			0x08
  64#define QUADSPI_IPCR_SEQID(x)		((x) << 24)
  65
  66#define QUADSPI_FLSHCR			0x0c
  67#define QUADSPI_FLSHCR_TCSS_MASK	GENMASK(3, 0)
  68#define QUADSPI_FLSHCR_TCSH_MASK	GENMASK(11, 8)
  69#define QUADSPI_FLSHCR_TDH_MASK		GENMASK(17, 16)
  70
  71#define QUADSPI_BUF0CR                  0x10
  72#define QUADSPI_BUF1CR                  0x14
  73#define QUADSPI_BUF2CR                  0x18
  74#define QUADSPI_BUFXCR_INVALID_MSTRID   0xe
  75
  76#define QUADSPI_BUF3CR			0x1c
  77#define QUADSPI_BUF3CR_ALLMST_MASK	BIT(31)
  78#define QUADSPI_BUF3CR_ADATSZ(x)	((x) << 8)
  79#define QUADSPI_BUF3CR_ADATSZ_MASK	GENMASK(15, 8)
  80
  81#define QUADSPI_BFGENCR			0x20
  82#define QUADSPI_BFGENCR_SEQID(x)	((x) << 12)
  83
  84#define QUADSPI_BUF0IND			0x30
  85#define QUADSPI_BUF1IND			0x34
  86#define QUADSPI_BUF2IND			0x38
  87#define QUADSPI_SFAR			0x100
  88
  89#define QUADSPI_SMPR			0x108
  90#define QUADSPI_SMPR_DDRSMP_MASK	GENMASK(18, 16)
  91#define QUADSPI_SMPR_FSDLY_MASK		BIT(6)
  92#define QUADSPI_SMPR_FSPHS_MASK		BIT(5)
  93#define QUADSPI_SMPR_HSENA_MASK		BIT(0)
  94
  95#define QUADSPI_RBCT			0x110
  96#define QUADSPI_RBCT_WMRK_MASK		GENMASK(4, 0)
  97#define QUADSPI_RBCT_RXBRD_USEIPS	BIT(8)
  98
  99#define QUADSPI_TBDR			0x154
 100
 101#define QUADSPI_SR			0x15c
 102#define QUADSPI_SR_IP_ACC_MASK		BIT(1)
 103#define QUADSPI_SR_AHB_ACC_MASK		BIT(2)
 104
 105#define QUADSPI_FR			0x160
 106#define QUADSPI_FR_TFF_MASK		BIT(0)
 107
 108#define QUADSPI_RSER			0x164
 109#define QUADSPI_RSER_TFIE		BIT(0)
 110
 111#define QUADSPI_SPTRCLR			0x16c
 112#define QUADSPI_SPTRCLR_IPPTRC		BIT(8)
 113#define QUADSPI_SPTRCLR_BFPTRC		BIT(0)
 114
 115#define QUADSPI_SFA1AD			0x180
 116#define QUADSPI_SFA2AD			0x184
 117#define QUADSPI_SFB1AD			0x188
 118#define QUADSPI_SFB2AD			0x18c
 119#define QUADSPI_RBDR(x)			(0x200 + ((x) * 4))
 120
 121#define QUADSPI_LUTKEY			0x300
 122#define QUADSPI_LUTKEY_VALUE		0x5AF05AF0
 123
 124#define QUADSPI_LCKCR			0x304
 125#define QUADSPI_LCKER_LOCK		BIT(0)
 126#define QUADSPI_LCKER_UNLOCK		BIT(1)
 127
 
 
 
 128#define QUADSPI_LUT_BASE		0x310
 129#define QUADSPI_LUT_OFFSET		(SEQID_LUT * 4 * 4)
 130#define QUADSPI_LUT_REG(idx) \
 131	(QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
 132
 133/* Instruction set for the LUT register */
 134#define LUT_STOP		0
 135#define LUT_CMD			1
 136#define LUT_ADDR		2
 137#define LUT_DUMMY		3
 138#define LUT_MODE		4
 139#define LUT_MODE2		5
 140#define LUT_MODE4		6
 141#define LUT_FSL_READ		7
 142#define LUT_FSL_WRITE		8
 143#define LUT_JMP_ON_CS		9
 144#define LUT_ADDR_DDR		10
 145#define LUT_MODE_DDR		11
 146#define LUT_MODE2_DDR		12
 147#define LUT_MODE4_DDR		13
 148#define LUT_FSL_READ_DDR	14
 149#define LUT_FSL_WRITE_DDR	15
 150#define LUT_DATA_LEARN		16
 151
 152/*
 153 * The PAD definitions for LUT register.
 154 *
 155 * The pad stands for the number of IO lines [0:3].
 156 * For example, the quad read needs four IO lines,
 157 * so you should use LUT_PAD(4).
 158 */
 159#define LUT_PAD(x) (fls(x) - 1)
 160
 161/*
 162 * Macro for constructing the LUT entries with the following
 163 * register layout:
 164 *
 165 *  ---------------------------------------------------
 166 *  | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
 167 *  ---------------------------------------------------
 168 */
 169#define LUT_DEF(idx, ins, pad, opr)					\
 170	((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
 171
 172/* Controller needs driver to swap endianness */
 173#define QUADSPI_QUIRK_SWAP_ENDIAN	BIT(0)
 174
 175/* Controller needs 4x internal clock */
 176#define QUADSPI_QUIRK_4X_INT_CLK	BIT(1)
 177
 178/*
 179 * TKT253890, the controller needs the driver to fill the txfifo with
 180 * 16 bytes at least to trigger a data transfer, even though the extra
 181 * data won't be transferred.
 182 */
 183#define QUADSPI_QUIRK_TKT253890		BIT(2)
 184
 185/* TKT245618, the controller cannot wake up from wait mode */
 186#define QUADSPI_QUIRK_TKT245618		BIT(3)
 187
 188/*
 189 * Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
 190 * internally. No need to add it when setting SFXXAD and SFAR registers
 191 */
 192#define QUADSPI_QUIRK_BASE_INTERNAL	BIT(4)
 193
 194/*
 195 * Controller uses TDH bits in register QUADSPI_FLSHCR.
 196 * They need to be set in accordance with the DDR/SDR mode.
 197 */
 198#define QUADSPI_QUIRK_USE_TDH_SETTING	BIT(5)
 199
 200struct fsl_qspi_devtype_data {
 201	unsigned int rxfifo;
 202	unsigned int txfifo;
 203	int invalid_mstrid;
 204	unsigned int ahb_buf_size;
 205	unsigned int quirks;
 206	bool little_endian;
 207};
 208
 209static const struct fsl_qspi_devtype_data vybrid_data = {
 210	.rxfifo = SZ_128,
 211	.txfifo = SZ_64,
 212	.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
 213	.ahb_buf_size = SZ_1K,
 214	.quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
 215	.little_endian = true,
 216};
 217
 218static const struct fsl_qspi_devtype_data imx6sx_data = {
 219	.rxfifo = SZ_128,
 220	.txfifo = SZ_512,
 221	.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
 222	.ahb_buf_size = SZ_1K,
 223	.quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
 224	.little_endian = true,
 225};
 226
 227static const struct fsl_qspi_devtype_data imx7d_data = {
 228	.rxfifo = SZ_128,
 229	.txfifo = SZ_512,
 230	.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
 231	.ahb_buf_size = SZ_1K,
 232	.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
 233		  QUADSPI_QUIRK_USE_TDH_SETTING,
 234	.little_endian = true,
 235};
 236
 237static const struct fsl_qspi_devtype_data imx6ul_data = {
 238	.rxfifo = SZ_128,
 239	.txfifo = SZ_512,
 240	.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
 241	.ahb_buf_size = SZ_1K,
 242	.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
 243		  QUADSPI_QUIRK_USE_TDH_SETTING,
 244	.little_endian = true,
 245};
 246
 247static const struct fsl_qspi_devtype_data ls1021a_data = {
 248	.rxfifo = SZ_128,
 249	.txfifo = SZ_64,
 250	.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
 251	.ahb_buf_size = SZ_1K,
 252	.quirks = 0,
 253	.little_endian = false,
 254};
 255
 256static const struct fsl_qspi_devtype_data ls2080a_data = {
 257	.rxfifo = SZ_128,
 258	.txfifo = SZ_64,
 259	.ahb_buf_size = SZ_1K,
 260	.invalid_mstrid = 0x0,
 261	.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
 262	.little_endian = true,
 263};
 264
 265struct fsl_qspi {
 266	void __iomem *iobase;
 267	void __iomem *ahb_addr;
 268	u32 memmap_phy;
 269	struct clk *clk, *clk_en;
 270	struct device *dev;
 271	struct completion c;
 272	const struct fsl_qspi_devtype_data *devtype_data;
 273	struct mutex lock;
 274	struct pm_qos_request pm_qos_req;
 275	int selected;
 276};
 277
 278static inline int needs_swap_endian(struct fsl_qspi *q)
 279{
 280	return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
 281}
 282
 283static inline int needs_4x_clock(struct fsl_qspi *q)
 284{
 285	return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
 286}
 287
 288static inline int needs_fill_txfifo(struct fsl_qspi *q)
 289{
 290	return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
 291}
 292
 293static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
 294{
 295	return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
 296}
 297
 298static inline int needs_amba_base_offset(struct fsl_qspi *q)
 299{
 300	return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
 301}
 302
 303static inline int needs_tdh_setting(struct fsl_qspi *q)
 304{
 305	return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
 306}
 307
 308/*
 309 * An IC bug makes it necessary to rearrange the 32-bit data.
 310 * Later chips, such as IMX6SLX, have fixed this bug.
 311 */
 312static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
 313{
 314	return needs_swap_endian(q) ? __swab32(a) : a;
 315}
 316
 317/*
 318 * R/W functions for big- or little-endian registers:
 319 * The QSPI controller's endianness is independent of
 320 * the CPU core's endianness. So far, although the CPU
 321 * core is little-endian the QSPI controller can use
 322 * big-endian or little-endian.
 323 */
 324static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
 325{
 326	if (q->devtype_data->little_endian)
 327		iowrite32(val, addr);
 328	else
 329		iowrite32be(val, addr);
 330}
 331
 332static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
 333{
 334	if (q->devtype_data->little_endian)
 335		return ioread32(addr);
 336
 337	return ioread32be(addr);
 338}
 339
 340static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
 341{
 342	struct fsl_qspi *q = dev_id;
 343	u32 reg;
 344
 345	/* clear interrupt */
 346	reg = qspi_readl(q, q->iobase + QUADSPI_FR);
 347	qspi_writel(q, reg, q->iobase + QUADSPI_FR);
 348
 349	if (reg & QUADSPI_FR_TFF_MASK)
 350		complete(&q->c);
 351
 352	dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg);
 353	return IRQ_HANDLED;
 354}
 355
 356static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
 357{
 358	switch (width) {
 359	case 1:
 360	case 2:
 361	case 4:
 362		return 0;
 363	}
 364
 365	return -ENOTSUPP;
 366}
 367
 368static bool fsl_qspi_supports_op(struct spi_mem *mem,
 369				 const struct spi_mem_op *op)
 370{
 371	struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
 372	int ret;
 373
 374	ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
 375
 376	if (op->addr.nbytes)
 377		ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
 378
 379	if (op->dummy.nbytes)
 380		ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
 381
 382	if (op->data.nbytes)
 383		ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
 384
 385	if (ret)
 386		return false;
 387
 388	/*
 389	 * The number of instructions needed for the op, needs
 390	 * to fit into a single LUT entry.
 391	 */
 392	if (op->addr.nbytes +
 393	   (op->dummy.nbytes ? 1:0) +
 394	   (op->data.nbytes ? 1:0) > 6)
 395		return false;
 396
 397	/* Max 64 dummy clock cycles supported */
 398	if (op->dummy.nbytes &&
 399	    (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
 400		return false;
 401
 402	/* Max data length, check controller limits and alignment */
 403	if (op->data.dir == SPI_MEM_DATA_IN &&
 404	    (op->data.nbytes > q->devtype_data->ahb_buf_size ||
 405	     (op->data.nbytes > q->devtype_data->rxfifo - 4 &&
 406	      !IS_ALIGNED(op->data.nbytes, 8))))
 407		return false;
 408
 409	if (op->data.dir == SPI_MEM_DATA_OUT &&
 410	    op->data.nbytes > q->devtype_data->txfifo)
 411		return false;
 412
 413	return spi_mem_default_supports_op(mem, op);
 414}
 415
 416static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
 417				 const struct spi_mem_op *op)
 418{
 419	void __iomem *base = q->iobase;
 420	u32 lutval[4] = {};
 421	int lutidx = 1, i;
 422
 423	lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
 424			     op->cmd.opcode);
 425
 426	/*
 427	 * For some unknown reason, using LUT_ADDR doesn't work in some
 428	 * cases (at least with only one byte long addresses), so
 429	 * let's use LUT_MODE to write the address bytes one by one
 430	 */
 431	for (i = 0; i < op->addr.nbytes; i++) {
 432		u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
 433
 434		lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
 435					      LUT_PAD(op->addr.buswidth),
 436					      addrbyte);
 437		lutidx++;
 438	}
 439
 440	if (op->dummy.nbytes) {
 441		lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
 442					      LUT_PAD(op->dummy.buswidth),
 443					      op->dummy.nbytes * 8 /
 444					      op->dummy.buswidth);
 445		lutidx++;
 446	}
 447
 448	if (op->data.nbytes) {
 449		lutval[lutidx / 2] |= LUT_DEF(lutidx,
 450					      op->data.dir == SPI_MEM_DATA_IN ?
 451					      LUT_FSL_READ : LUT_FSL_WRITE,
 452					      LUT_PAD(op->data.buswidth),
 453					      0);
 454		lutidx++;
 455	}
 456
 457	lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
 458
 459	/* unlock LUT */
 460	qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
 461	qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
 462
 463	/* fill LUT */
 464	for (i = 0; i < ARRAY_SIZE(lutval); i++)
 465		qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
 466
 467	/* lock LUT */
 468	qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
 469	qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
 470}
 471
 472static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
 473{
 474	int ret;
 475
 476	ret = clk_prepare_enable(q->clk_en);
 477	if (ret)
 478		return ret;
 479
 480	ret = clk_prepare_enable(q->clk);
 481	if (ret) {
 482		clk_disable_unprepare(q->clk_en);
 483		return ret;
 484	}
 485
 486	if (needs_wakeup_wait_mode(q))
 487		cpu_latency_qos_add_request(&q->pm_qos_req, 0);
 488
 489	return 0;
 490}
 491
 492static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
 493{
 494	if (needs_wakeup_wait_mode(q))
 495		cpu_latency_qos_remove_request(&q->pm_qos_req);
 496
 497	clk_disable_unprepare(q->clk);
 498	clk_disable_unprepare(q->clk_en);
 499}
 500
 501/*
 502 * If we have changed the content of the flash by writing or erasing, or if we
 503 * read from flash with a different offset into the page buffer, we need to
 504 * invalidate the AHB buffer. If we do not do so, we may read out the wrong
 505 * data. The spec tells us reset the AHB domain and Serial Flash domain at
 506 * the same time.
 507 */
 508static void fsl_qspi_invalidate(struct fsl_qspi *q)
 509{
 510	u32 reg;
 511
 512	reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
 513	reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
 514	qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
 515
 516	/*
 517	 * The minimum delay : 1 AHB + 2 SFCK clocks.
 518	 * Delay 1 us is enough.
 519	 */
 520	udelay(1);
 521
 522	reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
 523	qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
 524}
 525
 526static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
 527{
 528	unsigned long rate = spi->max_speed_hz;
 529	int ret;
 530
 531	if (q->selected == spi->chip_select)
 532		return;
 533
 534	if (needs_4x_clock(q))
 535		rate *= 4;
 536
 537	fsl_qspi_clk_disable_unprep(q);
 538
 539	ret = clk_set_rate(q->clk, rate);
 540	if (ret)
 541		return;
 542
 543	ret = fsl_qspi_clk_prep_enable(q);
 544	if (ret)
 545		return;
 546
 547	q->selected = spi->chip_select;
 548
 549	fsl_qspi_invalidate(q);
 550}
 551
 552static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
 553{
 554	memcpy_fromio(op->data.buf.in,
 555		      q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size,
 556		      op->data.nbytes);
 557}
 558
 559static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
 560				 const struct spi_mem_op *op)
 561{
 562	void __iomem *base = q->iobase;
 563	int i;
 564	u32 val;
 565
 566	for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
 567		memcpy(&val, op->data.buf.out + i, 4);
 568		val = fsl_qspi_endian_xchg(q, val);
 569		qspi_writel(q, val, base + QUADSPI_TBDR);
 570	}
 571
 572	if (i < op->data.nbytes) {
 573		memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
 574		val = fsl_qspi_endian_xchg(q, val);
 575		qspi_writel(q, val, base + QUADSPI_TBDR);
 576	}
 577
 578	if (needs_fill_txfifo(q)) {
 579		for (i = op->data.nbytes; i < 16; i += 4)
 580			qspi_writel(q, 0, base + QUADSPI_TBDR);
 581	}
 582}
 583
 584static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
 585			  const struct spi_mem_op *op)
 586{
 587	void __iomem *base = q->iobase;
 588	int i;
 589	u8 *buf = op->data.buf.in;
 590	u32 val;
 591
 592	for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
 593		val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
 594		val = fsl_qspi_endian_xchg(q, val);
 595		memcpy(buf + i, &val, 4);
 596	}
 597
 598	if (i < op->data.nbytes) {
 599		val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
 600		val = fsl_qspi_endian_xchg(q, val);
 601		memcpy(buf + i, &val, op->data.nbytes - i);
 602	}
 603}
 604
 605static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
 606{
 607	void __iomem *base = q->iobase;
 608	int err = 0;
 609
 610	init_completion(&q->c);
 611
 612	/*
 613	 * Always start the sequence at the same index since we update
 614	 * the LUT at each exec_op() call. And also specify the DATA
 615	 * length, since it's has not been specified in the LUT.
 616	 */
 617	qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
 618		    base + QUADSPI_IPCR);
 619
 620	/* Wait for the interrupt. */
 621	if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)))
 622		err = -ETIMEDOUT;
 623
 624	if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
 625		fsl_qspi_read_rxfifo(q, op);
 626
 627	return err;
 628}
 629
 630static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
 631				    u32 mask, u32 delay_us, u32 timeout_us)
 632{
 633	u32 reg;
 634
 635	if (!q->devtype_data->little_endian)
 636		mask = (u32)cpu_to_be32(mask);
 637
 638	return readl_poll_timeout(base, reg, !(reg & mask), delay_us,
 639				  timeout_us);
 640}
 641
 642static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
 643{
 644	struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
 645	void __iomem *base = q->iobase;
 646	u32 addr_offset = 0;
 647	int err = 0;
 648	int invalid_mstrid = q->devtype_data->invalid_mstrid;
 649
 650	mutex_lock(&q->lock);
 651
 652	/* wait for the controller being ready */
 653	fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
 654				 QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
 655
 656	fsl_qspi_select_mem(q, mem->spi);
 657
 658	if (needs_amba_base_offset(q))
 659		addr_offset = q->memmap_phy;
 660
 661	qspi_writel(q,
 662		    q->selected * q->devtype_data->ahb_buf_size + addr_offset,
 663		    base + QUADSPI_SFAR);
 664
 665	qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
 666		    QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
 667		    base + QUADSPI_MCR);
 668
 669	qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
 670		    base + QUADSPI_SPTRCLR);
 671
 672	qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF0CR);
 673	qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF1CR);
 674	qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF2CR);
 675
 676	fsl_qspi_prepare_lut(q, op);
 677
 678	/*
 679	 * If we have large chunks of data, we read them through the AHB bus
 680	 * by accessing the mapped memory. In all other cases we use
 681	 * IP commands to access the flash.
 682	 */
 683	if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
 684	    op->data.dir == SPI_MEM_DATA_IN) {
 685		fsl_qspi_read_ahb(q, op);
 686	} else {
 687		qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
 688			    QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
 689
 690		if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
 691			fsl_qspi_fill_txfifo(q, op);
 692
 693		err = fsl_qspi_do_op(q, op);
 694	}
 695
 696	/* Invalidate the data in the AHB buffer. */
 697	fsl_qspi_invalidate(q);
 698
 699	mutex_unlock(&q->lock);
 700
 701	return err;
 702}
 703
 704static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
 705{
 706	struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
 707
 708	if (op->data.dir == SPI_MEM_DATA_OUT) {
 709		if (op->data.nbytes > q->devtype_data->txfifo)
 710			op->data.nbytes = q->devtype_data->txfifo;
 711	} else {
 712		if (op->data.nbytes > q->devtype_data->ahb_buf_size)
 713			op->data.nbytes = q->devtype_data->ahb_buf_size;
 714		else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
 715			op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
 716	}
 717
 718	return 0;
 719}
 720
 721static int fsl_qspi_default_setup(struct fsl_qspi *q)
 722{
 723	void __iomem *base = q->iobase;
 724	u32 reg, addr_offset = 0;
 725	int ret;
 726
 727	/* disable and unprepare clock to avoid glitch pass to controller */
 728	fsl_qspi_clk_disable_unprep(q);
 729
 730	/* the default frequency, we will change it later if necessary. */
 731	ret = clk_set_rate(q->clk, 66000000);
 732	if (ret)
 733		return ret;
 734
 735	ret = fsl_qspi_clk_prep_enable(q);
 736	if (ret)
 737		return ret;
 738
 739	/* Reset the module */
 740	qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
 741		    base + QUADSPI_MCR);
 742	udelay(1);
 743
 744	/* Disable the module */
 745	qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
 746		    base + QUADSPI_MCR);
 747
 748	/*
 749	 * Previous boot stages (BootROM, bootloader) might have used DDR
 750	 * mode and did not clear the TDH bits. As we currently use SDR mode
 751	 * only, clear the TDH bits if necessary.
 752	 */
 753	if (needs_tdh_setting(q))
 754		qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
 755			    ~QUADSPI_FLSHCR_TDH_MASK,
 756			    base + QUADSPI_FLSHCR);
 757
 758	reg = qspi_readl(q, base + QUADSPI_SMPR);
 759	qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
 760			| QUADSPI_SMPR_FSPHS_MASK
 761			| QUADSPI_SMPR_HSENA_MASK
 762			| QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
 763
 764	/* We only use the buffer3 for AHB read */
 765	qspi_writel(q, 0, base + QUADSPI_BUF0IND);
 766	qspi_writel(q, 0, base + QUADSPI_BUF1IND);
 767	qspi_writel(q, 0, base + QUADSPI_BUF2IND);
 768
 769	qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
 770		    q->iobase + QUADSPI_BFGENCR);
 771	qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
 772	qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
 773		    QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
 774		    base + QUADSPI_BUF3CR);
 775
 776	if (needs_amba_base_offset(q))
 777		addr_offset = q->memmap_phy;
 778
 779	/*
 780	 * In HW there can be a maximum of four chips on two buses with
 781	 * two chip selects on each bus. We use four chip selects in SW
 782	 * to differentiate between the four chips.
 783	 * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
 784	 * SFB2AD accordingly.
 785	 */
 786	qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset,
 787		    base + QUADSPI_SFA1AD);
 788	qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset,
 789		    base + QUADSPI_SFA2AD);
 790	qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset,
 791		    base + QUADSPI_SFB1AD);
 792	qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset,
 793		    base + QUADSPI_SFB2AD);
 794
 795	q->selected = -1;
 796
 797	/* Enable the module */
 798	qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
 799		    base + QUADSPI_MCR);
 800
 801	/* clear all interrupt status */
 802	qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
 803
 804	/* enable the interrupt */
 805	qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
 806
 807	return 0;
 808}
 809
 810static const char *fsl_qspi_get_name(struct spi_mem *mem)
 811{
 812	struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
 813	struct device *dev = &mem->spi->dev;
 814	const char *name;
 815
 816	/*
 817	 * In order to keep mtdparts compatible with the old MTD driver at
 818	 * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the
 819	 * platform_device of the controller.
 820	 */
 821	if (of_get_available_child_count(q->dev->of_node) == 1)
 822		return dev_name(q->dev);
 823
 824	name = devm_kasprintf(dev, GFP_KERNEL,
 825			      "%s-%d", dev_name(q->dev),
 826			      mem->spi->chip_select);
 827
 828	if (!name) {
 829		dev_err(dev, "failed to get memory for custom flash name\n");
 830		return ERR_PTR(-ENOMEM);
 831	}
 832
 833	return name;
 834}
 835
 836static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
 837	.adjust_op_size = fsl_qspi_adjust_op_size,
 838	.supports_op = fsl_qspi_supports_op,
 839	.exec_op = fsl_qspi_exec_op,
 840	.get_name = fsl_qspi_get_name,
 841};
 842
 843static int fsl_qspi_probe(struct platform_device *pdev)
 844{
 845	struct spi_controller *ctlr;
 846	struct device *dev = &pdev->dev;
 847	struct device_node *np = dev->of_node;
 848	struct resource *res;
 849	struct fsl_qspi *q;
 850	int ret;
 851
 852	ctlr = spi_alloc_master(&pdev->dev, sizeof(*q));
 853	if (!ctlr)
 854		return -ENOMEM;
 855
 856	ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
 857			  SPI_TX_DUAL | SPI_TX_QUAD;
 858
 859	q = spi_controller_get_devdata(ctlr);
 860	q->dev = dev;
 861	q->devtype_data = of_device_get_match_data(dev);
 862	if (!q->devtype_data) {
 863		ret = -ENODEV;
 864		goto err_put_ctrl;
 865	}
 866
 867	platform_set_drvdata(pdev, q);
 868
 869	/* find the resources */
 870	q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI");
 
 871	if (IS_ERR(q->iobase)) {
 872		ret = PTR_ERR(q->iobase);
 873		goto err_put_ctrl;
 874	}
 875
 876	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 877					"QuadSPI-memory");
 878	if (!res) {
 879		ret = -EINVAL;
 
 880		goto err_put_ctrl;
 881	}
 
 882	q->memmap_phy = res->start;
 883	/* Since there are 4 cs, map size required is 4 times ahb_buf_size */
 884	q->ahb_addr = devm_ioremap(dev, q->memmap_phy,
 885				   (q->devtype_data->ahb_buf_size * 4));
 886	if (!q->ahb_addr) {
 887		ret = -ENOMEM;
 888		goto err_put_ctrl;
 889	}
 890
 891	/* find the clocks */
 892	q->clk_en = devm_clk_get(dev, "qspi_en");
 893	if (IS_ERR(q->clk_en)) {
 894		ret = PTR_ERR(q->clk_en);
 895		goto err_put_ctrl;
 896	}
 897
 898	q->clk = devm_clk_get(dev, "qspi");
 899	if (IS_ERR(q->clk)) {
 900		ret = PTR_ERR(q->clk);
 901		goto err_put_ctrl;
 902	}
 903
 904	ret = fsl_qspi_clk_prep_enable(q);
 905	if (ret) {
 906		dev_err(dev, "can not enable the clock\n");
 907		goto err_put_ctrl;
 908	}
 909
 910	/* find the irq */
 911	ret = platform_get_irq(pdev, 0);
 912	if (ret < 0)
 913		goto err_disable_clk;
 914
 915	ret = devm_request_irq(dev, ret,
 916			fsl_qspi_irq_handler, 0, pdev->name, q);
 917	if (ret) {
 918		dev_err(dev, "failed to request irq: %d\n", ret);
 919		goto err_disable_clk;
 920	}
 921
 922	mutex_init(&q->lock);
 923
 924	ctlr->bus_num = -1;
 925	ctlr->num_chipselect = 4;
 926	ctlr->mem_ops = &fsl_qspi_mem_ops;
 927
 928	fsl_qspi_default_setup(q);
 929
 930	ctlr->dev.of_node = np;
 931
 932	ret = devm_spi_register_controller(dev, ctlr);
 933	if (ret)
 934		goto err_destroy_mutex;
 935
 936	return 0;
 937
 938err_destroy_mutex:
 939	mutex_destroy(&q->lock);
 940
 941err_disable_clk:
 942	fsl_qspi_clk_disable_unprep(q);
 943
 944err_put_ctrl:
 945	spi_controller_put(ctlr);
 946
 947	dev_err(dev, "Freescale QuadSPI probe failed\n");
 948	return ret;
 949}
 950
 951static int fsl_qspi_remove(struct platform_device *pdev)
 952{
 953	struct fsl_qspi *q = platform_get_drvdata(pdev);
 954
 955	/* disable the hardware */
 956	qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
 957	qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
 958
 959	fsl_qspi_clk_disable_unprep(q);
 960
 961	mutex_destroy(&q->lock);
 962
 963	return 0;
 964}
 965
 966static int fsl_qspi_suspend(struct device *dev)
 967{
 968	return 0;
 969}
 970
 971static int fsl_qspi_resume(struct device *dev)
 972{
 973	struct fsl_qspi *q = dev_get_drvdata(dev);
 974
 975	fsl_qspi_default_setup(q);
 976
 977	return 0;
 978}
 979
 980static const struct of_device_id fsl_qspi_dt_ids[] = {
 981	{ .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
 982	{ .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
 983	{ .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
 984	{ .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
 985	{ .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, },
 986	{ .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
 987	{ /* sentinel */ }
 988};
 989MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
 990
 991static const struct dev_pm_ops fsl_qspi_pm_ops = {
 992	.suspend	= fsl_qspi_suspend,
 993	.resume		= fsl_qspi_resume,
 994};
 995
 996static struct platform_driver fsl_qspi_driver = {
 997	.driver = {
 998		.name	= "fsl-quadspi",
 999		.of_match_table = fsl_qspi_dt_ids,
1000		.pm =   &fsl_qspi_pm_ops,
1001	},
1002	.probe          = fsl_qspi_probe,
1003	.remove		= fsl_qspi_remove,
1004};
1005module_platform_driver(fsl_qspi_driver);
1006
1007MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
1008MODULE_AUTHOR("Freescale Semiconductor Inc.");
1009MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
1010MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
1011MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>");
1012MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>");
1013MODULE_LICENSE("GPL v2");
v5.4
  1// SPDX-License-Identifier: GPL-2.0+
  2
  3/*
  4 * Freescale QuadSPI driver.
  5 *
  6 * Copyright (C) 2013 Freescale Semiconductor, Inc.
  7 * Copyright (C) 2018 Bootlin
  8 * Copyright (C) 2018 exceet electronics GmbH
  9 * Copyright (C) 2018 Kontron Electronics GmbH
 10 *
 11 * Transition to SPI MEM interface:
 12 * Authors:
 13 *     Boris Brezillon <bbrezillon@kernel.org>
 14 *     Frieder Schrempf <frieder.schrempf@kontron.de>
 15 *     Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
 16 *     Suresh Gupta <suresh.gupta@nxp.com>
 17 *
 18 * Based on the original fsl-quadspi.c spi-nor driver:
 19 * Author: Freescale Semiconductor, Inc.
 20 *
 21 */
 22
 23#include <linux/bitops.h>
 24#include <linux/clk.h>
 25#include <linux/completion.h>
 26#include <linux/delay.h>
 27#include <linux/err.h>
 28#include <linux/errno.h>
 29#include <linux/interrupt.h>
 30#include <linux/io.h>
 31#include <linux/iopoll.h>
 32#include <linux/jiffies.h>
 33#include <linux/kernel.h>
 34#include <linux/module.h>
 35#include <linux/mutex.h>
 36#include <linux/of.h>
 37#include <linux/of_device.h>
 38#include <linux/platform_device.h>
 39#include <linux/pm_qos.h>
 40#include <linux/sizes.h>
 41
 42#include <linux/spi/spi.h>
 43#include <linux/spi/spi-mem.h>
 44
 45/*
 46 * The driver only uses one single LUT entry, that is updated on
 47 * each call of exec_op(). Index 0 is preset at boot with a basic
 48 * read operation, so let's use the last entry (15).
 49 */
 50#define	SEQID_LUT			15
 51
 52/* Registers used by the driver */
 53#define QUADSPI_MCR			0x00
 54#define QUADSPI_MCR_RESERVED_MASK	GENMASK(19, 16)
 55#define QUADSPI_MCR_MDIS_MASK		BIT(14)
 56#define QUADSPI_MCR_CLR_TXF_MASK	BIT(11)
 57#define QUADSPI_MCR_CLR_RXF_MASK	BIT(10)
 58#define QUADSPI_MCR_DDR_EN_MASK		BIT(7)
 59#define QUADSPI_MCR_END_CFG_MASK	GENMASK(3, 2)
 60#define QUADSPI_MCR_SWRSTHD_MASK	BIT(1)
 61#define QUADSPI_MCR_SWRSTSD_MASK	BIT(0)
 62
 63#define QUADSPI_IPCR			0x08
 64#define QUADSPI_IPCR_SEQID(x)		((x) << 24)
 65
 
 
 
 
 
 
 
 
 
 
 66#define QUADSPI_BUF3CR			0x1c
 67#define QUADSPI_BUF3CR_ALLMST_MASK	BIT(31)
 68#define QUADSPI_BUF3CR_ADATSZ(x)	((x) << 8)
 69#define QUADSPI_BUF3CR_ADATSZ_MASK	GENMASK(15, 8)
 70
 71#define QUADSPI_BFGENCR			0x20
 72#define QUADSPI_BFGENCR_SEQID(x)	((x) << 12)
 73
 74#define QUADSPI_BUF0IND			0x30
 75#define QUADSPI_BUF1IND			0x34
 76#define QUADSPI_BUF2IND			0x38
 77#define QUADSPI_SFAR			0x100
 78
 79#define QUADSPI_SMPR			0x108
 80#define QUADSPI_SMPR_DDRSMP_MASK	GENMASK(18, 16)
 81#define QUADSPI_SMPR_FSDLY_MASK		BIT(6)
 82#define QUADSPI_SMPR_FSPHS_MASK		BIT(5)
 83#define QUADSPI_SMPR_HSENA_MASK		BIT(0)
 84
 85#define QUADSPI_RBCT			0x110
 86#define QUADSPI_RBCT_WMRK_MASK		GENMASK(4, 0)
 87#define QUADSPI_RBCT_RXBRD_USEIPS	BIT(8)
 88
 89#define QUADSPI_TBDR			0x154
 90
 91#define QUADSPI_SR			0x15c
 92#define QUADSPI_SR_IP_ACC_MASK		BIT(1)
 93#define QUADSPI_SR_AHB_ACC_MASK		BIT(2)
 94
 95#define QUADSPI_FR			0x160
 96#define QUADSPI_FR_TFF_MASK		BIT(0)
 97
 
 
 
 98#define QUADSPI_SPTRCLR			0x16c
 99#define QUADSPI_SPTRCLR_IPPTRC		BIT(8)
100#define QUADSPI_SPTRCLR_BFPTRC		BIT(0)
101
102#define QUADSPI_SFA1AD			0x180
103#define QUADSPI_SFA2AD			0x184
104#define QUADSPI_SFB1AD			0x188
105#define QUADSPI_SFB2AD			0x18c
106#define QUADSPI_RBDR(x)			(0x200 + ((x) * 4))
107
108#define QUADSPI_LUTKEY			0x300
109#define QUADSPI_LUTKEY_VALUE		0x5AF05AF0
110
111#define QUADSPI_LCKCR			0x304
112#define QUADSPI_LCKER_LOCK		BIT(0)
113#define QUADSPI_LCKER_UNLOCK		BIT(1)
114
115#define QUADSPI_RSER			0x164
116#define QUADSPI_RSER_TFIE		BIT(0)
117
118#define QUADSPI_LUT_BASE		0x310
119#define QUADSPI_LUT_OFFSET		(SEQID_LUT * 4 * 4)
120#define QUADSPI_LUT_REG(idx) \
121	(QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
122
123/* Instruction set for the LUT register */
124#define LUT_STOP		0
125#define LUT_CMD			1
126#define LUT_ADDR		2
127#define LUT_DUMMY		3
128#define LUT_MODE		4
129#define LUT_MODE2		5
130#define LUT_MODE4		6
131#define LUT_FSL_READ		7
132#define LUT_FSL_WRITE		8
133#define LUT_JMP_ON_CS		9
134#define LUT_ADDR_DDR		10
135#define LUT_MODE_DDR		11
136#define LUT_MODE2_DDR		12
137#define LUT_MODE4_DDR		13
138#define LUT_FSL_READ_DDR	14
139#define LUT_FSL_WRITE_DDR	15
140#define LUT_DATA_LEARN		16
141
142/*
143 * The PAD definitions for LUT register.
144 *
145 * The pad stands for the number of IO lines [0:3].
146 * For example, the quad read needs four IO lines,
147 * so you should use LUT_PAD(4).
148 */
149#define LUT_PAD(x) (fls(x) - 1)
150
151/*
152 * Macro for constructing the LUT entries with the following
153 * register layout:
154 *
155 *  ---------------------------------------------------
156 *  | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
157 *  ---------------------------------------------------
158 */
159#define LUT_DEF(idx, ins, pad, opr)					\
160	((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
161
162/* Controller needs driver to swap endianness */
163#define QUADSPI_QUIRK_SWAP_ENDIAN	BIT(0)
164
165/* Controller needs 4x internal clock */
166#define QUADSPI_QUIRK_4X_INT_CLK	BIT(1)
167
168/*
169 * TKT253890, the controller needs the driver to fill the txfifo with
170 * 16 bytes at least to trigger a data transfer, even though the extra
171 * data won't be transferred.
172 */
173#define QUADSPI_QUIRK_TKT253890		BIT(2)
174
175/* TKT245618, the controller cannot wake up from wait mode */
176#define QUADSPI_QUIRK_TKT245618		BIT(3)
177
178/*
179 * Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
180 * internally. No need to add it when setting SFXXAD and SFAR registers
181 */
182#define QUADSPI_QUIRK_BASE_INTERNAL	BIT(4)
183
 
 
 
 
 
 
184struct fsl_qspi_devtype_data {
185	unsigned int rxfifo;
186	unsigned int txfifo;
 
187	unsigned int ahb_buf_size;
188	unsigned int quirks;
189	bool little_endian;
190};
191
192static const struct fsl_qspi_devtype_data vybrid_data = {
193	.rxfifo = SZ_128,
194	.txfifo = SZ_64,
 
195	.ahb_buf_size = SZ_1K,
196	.quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
197	.little_endian = true,
198};
199
200static const struct fsl_qspi_devtype_data imx6sx_data = {
201	.rxfifo = SZ_128,
202	.txfifo = SZ_512,
 
203	.ahb_buf_size = SZ_1K,
204	.quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
205	.little_endian = true,
206};
207
208static const struct fsl_qspi_devtype_data imx7d_data = {
209	.rxfifo = SZ_128,
210	.txfifo = SZ_512,
 
211	.ahb_buf_size = SZ_1K,
212	.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
 
213	.little_endian = true,
214};
215
216static const struct fsl_qspi_devtype_data imx6ul_data = {
217	.rxfifo = SZ_128,
218	.txfifo = SZ_512,
 
219	.ahb_buf_size = SZ_1K,
220	.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
 
221	.little_endian = true,
222};
223
224static const struct fsl_qspi_devtype_data ls1021a_data = {
225	.rxfifo = SZ_128,
226	.txfifo = SZ_64,
 
227	.ahb_buf_size = SZ_1K,
228	.quirks = 0,
229	.little_endian = false,
230};
231
232static const struct fsl_qspi_devtype_data ls2080a_data = {
233	.rxfifo = SZ_128,
234	.txfifo = SZ_64,
235	.ahb_buf_size = SZ_1K,
 
236	.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
237	.little_endian = true,
238};
239
240struct fsl_qspi {
241	void __iomem *iobase;
242	void __iomem *ahb_addr;
243	u32 memmap_phy;
244	struct clk *clk, *clk_en;
245	struct device *dev;
246	struct completion c;
247	const struct fsl_qspi_devtype_data *devtype_data;
248	struct mutex lock;
249	struct pm_qos_request pm_qos_req;
250	int selected;
251};
252
253static inline int needs_swap_endian(struct fsl_qspi *q)
254{
255	return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
256}
257
258static inline int needs_4x_clock(struct fsl_qspi *q)
259{
260	return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
261}
262
263static inline int needs_fill_txfifo(struct fsl_qspi *q)
264{
265	return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
266}
267
268static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
269{
270	return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
271}
272
273static inline int needs_amba_base_offset(struct fsl_qspi *q)
274{
275	return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
276}
277
 
 
 
 
 
278/*
279 * An IC bug makes it necessary to rearrange the 32-bit data.
280 * Later chips, such as IMX6SLX, have fixed this bug.
281 */
282static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
283{
284	return needs_swap_endian(q) ? __swab32(a) : a;
285}
286
287/*
288 * R/W functions for big- or little-endian registers:
289 * The QSPI controller's endianness is independent of
290 * the CPU core's endianness. So far, although the CPU
291 * core is little-endian the QSPI controller can use
292 * big-endian or little-endian.
293 */
294static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
295{
296	if (q->devtype_data->little_endian)
297		iowrite32(val, addr);
298	else
299		iowrite32be(val, addr);
300}
301
302static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
303{
304	if (q->devtype_data->little_endian)
305		return ioread32(addr);
306
307	return ioread32be(addr);
308}
309
310static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
311{
312	struct fsl_qspi *q = dev_id;
313	u32 reg;
314
315	/* clear interrupt */
316	reg = qspi_readl(q, q->iobase + QUADSPI_FR);
317	qspi_writel(q, reg, q->iobase + QUADSPI_FR);
318
319	if (reg & QUADSPI_FR_TFF_MASK)
320		complete(&q->c);
321
322	dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg);
323	return IRQ_HANDLED;
324}
325
326static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
327{
328	switch (width) {
329	case 1:
330	case 2:
331	case 4:
332		return 0;
333	}
334
335	return -ENOTSUPP;
336}
337
338static bool fsl_qspi_supports_op(struct spi_mem *mem,
339				 const struct spi_mem_op *op)
340{
341	struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
342	int ret;
343
344	ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
345
346	if (op->addr.nbytes)
347		ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
348
349	if (op->dummy.nbytes)
350		ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
351
352	if (op->data.nbytes)
353		ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
354
355	if (ret)
356		return false;
357
358	/*
359	 * The number of instructions needed for the op, needs
360	 * to fit into a single LUT entry.
361	 */
362	if (op->addr.nbytes +
363	   (op->dummy.nbytes ? 1:0) +
364	   (op->data.nbytes ? 1:0) > 6)
365		return false;
366
367	/* Max 64 dummy clock cycles supported */
368	if (op->dummy.nbytes &&
369	    (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
370		return false;
371
372	/* Max data length, check controller limits and alignment */
373	if (op->data.dir == SPI_MEM_DATA_IN &&
374	    (op->data.nbytes > q->devtype_data->ahb_buf_size ||
375	     (op->data.nbytes > q->devtype_data->rxfifo - 4 &&
376	      !IS_ALIGNED(op->data.nbytes, 8))))
377		return false;
378
379	if (op->data.dir == SPI_MEM_DATA_OUT &&
380	    op->data.nbytes > q->devtype_data->txfifo)
381		return false;
382
383	return true;
384}
385
386static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
387				 const struct spi_mem_op *op)
388{
389	void __iomem *base = q->iobase;
390	u32 lutval[4] = {};
391	int lutidx = 1, i;
392
393	lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
394			     op->cmd.opcode);
395
396	/*
397	 * For some unknown reason, using LUT_ADDR doesn't work in some
398	 * cases (at least with only one byte long addresses), so
399	 * let's use LUT_MODE to write the address bytes one by one
400	 */
401	for (i = 0; i < op->addr.nbytes; i++) {
402		u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
403
404		lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
405					      LUT_PAD(op->addr.buswidth),
406					      addrbyte);
407		lutidx++;
408	}
409
410	if (op->dummy.nbytes) {
411		lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
412					      LUT_PAD(op->dummy.buswidth),
413					      op->dummy.nbytes * 8 /
414					      op->dummy.buswidth);
415		lutidx++;
416	}
417
418	if (op->data.nbytes) {
419		lutval[lutidx / 2] |= LUT_DEF(lutidx,
420					      op->data.dir == SPI_MEM_DATA_IN ?
421					      LUT_FSL_READ : LUT_FSL_WRITE,
422					      LUT_PAD(op->data.buswidth),
423					      0);
424		lutidx++;
425	}
426
427	lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
428
429	/* unlock LUT */
430	qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
431	qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
432
433	/* fill LUT */
434	for (i = 0; i < ARRAY_SIZE(lutval); i++)
435		qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
436
437	/* lock LUT */
438	qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
439	qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
440}
441
442static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
443{
444	int ret;
445
446	ret = clk_prepare_enable(q->clk_en);
447	if (ret)
448		return ret;
449
450	ret = clk_prepare_enable(q->clk);
451	if (ret) {
452		clk_disable_unprepare(q->clk_en);
453		return ret;
454	}
455
456	if (needs_wakeup_wait_mode(q))
457		pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
458
459	return 0;
460}
461
462static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
463{
464	if (needs_wakeup_wait_mode(q))
465		pm_qos_remove_request(&q->pm_qos_req);
466
467	clk_disable_unprepare(q->clk);
468	clk_disable_unprepare(q->clk_en);
469}
470
471/*
472 * If we have changed the content of the flash by writing or erasing, or if we
473 * read from flash with a different offset into the page buffer, we need to
474 * invalidate the AHB buffer. If we do not do so, we may read out the wrong
475 * data. The spec tells us reset the AHB domain and Serial Flash domain at
476 * the same time.
477 */
478static void fsl_qspi_invalidate(struct fsl_qspi *q)
479{
480	u32 reg;
481
482	reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
483	reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
484	qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
485
486	/*
487	 * The minimum delay : 1 AHB + 2 SFCK clocks.
488	 * Delay 1 us is enough.
489	 */
490	udelay(1);
491
492	reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
493	qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
494}
495
496static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
497{
498	unsigned long rate = spi->max_speed_hz;
499	int ret;
500
501	if (q->selected == spi->chip_select)
502		return;
503
504	if (needs_4x_clock(q))
505		rate *= 4;
506
507	fsl_qspi_clk_disable_unprep(q);
508
509	ret = clk_set_rate(q->clk, rate);
510	if (ret)
511		return;
512
513	ret = fsl_qspi_clk_prep_enable(q);
514	if (ret)
515		return;
516
517	q->selected = spi->chip_select;
518
519	fsl_qspi_invalidate(q);
520}
521
522static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
523{
524	memcpy_fromio(op->data.buf.in,
525		      q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size,
526		      op->data.nbytes);
527}
528
529static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
530				 const struct spi_mem_op *op)
531{
532	void __iomem *base = q->iobase;
533	int i;
534	u32 val;
535
536	for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
537		memcpy(&val, op->data.buf.out + i, 4);
538		val = fsl_qspi_endian_xchg(q, val);
539		qspi_writel(q, val, base + QUADSPI_TBDR);
540	}
541
542	if (i < op->data.nbytes) {
543		memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
544		val = fsl_qspi_endian_xchg(q, val);
545		qspi_writel(q, val, base + QUADSPI_TBDR);
546	}
547
548	if (needs_fill_txfifo(q)) {
549		for (i = op->data.nbytes; i < 16; i += 4)
550			qspi_writel(q, 0, base + QUADSPI_TBDR);
551	}
552}
553
554static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
555			  const struct spi_mem_op *op)
556{
557	void __iomem *base = q->iobase;
558	int i;
559	u8 *buf = op->data.buf.in;
560	u32 val;
561
562	for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
563		val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
564		val = fsl_qspi_endian_xchg(q, val);
565		memcpy(buf + i, &val, 4);
566	}
567
568	if (i < op->data.nbytes) {
569		val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
570		val = fsl_qspi_endian_xchg(q, val);
571		memcpy(buf + i, &val, op->data.nbytes - i);
572	}
573}
574
575static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
576{
577	void __iomem *base = q->iobase;
578	int err = 0;
579
580	init_completion(&q->c);
581
582	/*
583	 * Always start the sequence at the same index since we update
584	 * the LUT at each exec_op() call. And also specify the DATA
585	 * length, since it's has not been specified in the LUT.
586	 */
587	qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
588		    base + QUADSPI_IPCR);
589
590	/* Wait for the interrupt. */
591	if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)))
592		err = -ETIMEDOUT;
593
594	if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
595		fsl_qspi_read_rxfifo(q, op);
596
597	return err;
598}
599
600static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
601				    u32 mask, u32 delay_us, u32 timeout_us)
602{
603	u32 reg;
604
605	if (!q->devtype_data->little_endian)
606		mask = (u32)cpu_to_be32(mask);
607
608	return readl_poll_timeout(base, reg, !(reg & mask), delay_us,
609				  timeout_us);
610}
611
612static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
613{
614	struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
615	void __iomem *base = q->iobase;
616	u32 addr_offset = 0;
617	int err = 0;
 
618
619	mutex_lock(&q->lock);
620
621	/* wait for the controller being ready */
622	fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
623				 QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
624
625	fsl_qspi_select_mem(q, mem->spi);
626
627	if (needs_amba_base_offset(q))
628		addr_offset = q->memmap_phy;
629
630	qspi_writel(q,
631		    q->selected * q->devtype_data->ahb_buf_size + addr_offset,
632		    base + QUADSPI_SFAR);
633
634	qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
635		    QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
636		    base + QUADSPI_MCR);
637
638	qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
639		    base + QUADSPI_SPTRCLR);
640
 
 
 
 
641	fsl_qspi_prepare_lut(q, op);
642
643	/*
644	 * If we have large chunks of data, we read them through the AHB bus
645	 * by accessing the mapped memory. In all other cases we use
646	 * IP commands to access the flash.
647	 */
648	if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
649	    op->data.dir == SPI_MEM_DATA_IN) {
650		fsl_qspi_read_ahb(q, op);
651	} else {
652		qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
653			    QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
654
655		if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
656			fsl_qspi_fill_txfifo(q, op);
657
658		err = fsl_qspi_do_op(q, op);
659	}
660
661	/* Invalidate the data in the AHB buffer. */
662	fsl_qspi_invalidate(q);
663
664	mutex_unlock(&q->lock);
665
666	return err;
667}
668
669static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
670{
671	struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
672
673	if (op->data.dir == SPI_MEM_DATA_OUT) {
674		if (op->data.nbytes > q->devtype_data->txfifo)
675			op->data.nbytes = q->devtype_data->txfifo;
676	} else {
677		if (op->data.nbytes > q->devtype_data->ahb_buf_size)
678			op->data.nbytes = q->devtype_data->ahb_buf_size;
679		else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
680			op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
681	}
682
683	return 0;
684}
685
686static int fsl_qspi_default_setup(struct fsl_qspi *q)
687{
688	void __iomem *base = q->iobase;
689	u32 reg, addr_offset = 0;
690	int ret;
691
692	/* disable and unprepare clock to avoid glitch pass to controller */
693	fsl_qspi_clk_disable_unprep(q);
694
695	/* the default frequency, we will change it later if necessary. */
696	ret = clk_set_rate(q->clk, 66000000);
697	if (ret)
698		return ret;
699
700	ret = fsl_qspi_clk_prep_enable(q);
701	if (ret)
702		return ret;
703
704	/* Reset the module */
705	qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
706		    base + QUADSPI_MCR);
707	udelay(1);
708
709	/* Disable the module */
710	qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
711		    base + QUADSPI_MCR);
712
 
 
 
 
 
 
 
 
 
 
713	reg = qspi_readl(q, base + QUADSPI_SMPR);
714	qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
715			| QUADSPI_SMPR_FSPHS_MASK
716			| QUADSPI_SMPR_HSENA_MASK
717			| QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
718
719	/* We only use the buffer3 for AHB read */
720	qspi_writel(q, 0, base + QUADSPI_BUF0IND);
721	qspi_writel(q, 0, base + QUADSPI_BUF1IND);
722	qspi_writel(q, 0, base + QUADSPI_BUF2IND);
723
724	qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
725		    q->iobase + QUADSPI_BFGENCR);
726	qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
727	qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
728		    QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
729		    base + QUADSPI_BUF3CR);
730
731	if (needs_amba_base_offset(q))
732		addr_offset = q->memmap_phy;
733
734	/*
735	 * In HW there can be a maximum of four chips on two buses with
736	 * two chip selects on each bus. We use four chip selects in SW
737	 * to differentiate between the four chips.
738	 * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
739	 * SFB2AD accordingly.
740	 */
741	qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset,
742		    base + QUADSPI_SFA1AD);
743	qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset,
744		    base + QUADSPI_SFA2AD);
745	qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset,
746		    base + QUADSPI_SFB1AD);
747	qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset,
748		    base + QUADSPI_SFB2AD);
749
750	q->selected = -1;
751
752	/* Enable the module */
753	qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
754		    base + QUADSPI_MCR);
755
756	/* clear all interrupt status */
757	qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
758
759	/* enable the interrupt */
760	qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
761
762	return 0;
763}
764
765static const char *fsl_qspi_get_name(struct spi_mem *mem)
766{
767	struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
768	struct device *dev = &mem->spi->dev;
769	const char *name;
770
771	/*
772	 * In order to keep mtdparts compatible with the old MTD driver at
773	 * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the
774	 * platform_device of the controller.
775	 */
776	if (of_get_available_child_count(q->dev->of_node) == 1)
777		return dev_name(q->dev);
778
779	name = devm_kasprintf(dev, GFP_KERNEL,
780			      "%s-%d", dev_name(q->dev),
781			      mem->spi->chip_select);
782
783	if (!name) {
784		dev_err(dev, "failed to get memory for custom flash name\n");
785		return ERR_PTR(-ENOMEM);
786	}
787
788	return name;
789}
790
791static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
792	.adjust_op_size = fsl_qspi_adjust_op_size,
793	.supports_op = fsl_qspi_supports_op,
794	.exec_op = fsl_qspi_exec_op,
795	.get_name = fsl_qspi_get_name,
796};
797
798static int fsl_qspi_probe(struct platform_device *pdev)
799{
800	struct spi_controller *ctlr;
801	struct device *dev = &pdev->dev;
802	struct device_node *np = dev->of_node;
803	struct resource *res;
804	struct fsl_qspi *q;
805	int ret;
806
807	ctlr = spi_alloc_master(&pdev->dev, sizeof(*q));
808	if (!ctlr)
809		return -ENOMEM;
810
811	ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
812			  SPI_TX_DUAL | SPI_TX_QUAD;
813
814	q = spi_controller_get_devdata(ctlr);
815	q->dev = dev;
816	q->devtype_data = of_device_get_match_data(dev);
817	if (!q->devtype_data) {
818		ret = -ENODEV;
819		goto err_put_ctrl;
820	}
821
822	platform_set_drvdata(pdev, q);
823
824	/* find the resources */
825	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
826	q->iobase = devm_ioremap_resource(dev, res);
827	if (IS_ERR(q->iobase)) {
828		ret = PTR_ERR(q->iobase);
829		goto err_put_ctrl;
830	}
831
832	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
833					"QuadSPI-memory");
834	q->ahb_addr = devm_ioremap_resource(dev, res);
835	if (IS_ERR(q->ahb_addr)) {
836		ret = PTR_ERR(q->ahb_addr);
837		goto err_put_ctrl;
838	}
839
840	q->memmap_phy = res->start;
 
 
 
 
 
 
 
841
842	/* find the clocks */
843	q->clk_en = devm_clk_get(dev, "qspi_en");
844	if (IS_ERR(q->clk_en)) {
845		ret = PTR_ERR(q->clk_en);
846		goto err_put_ctrl;
847	}
848
849	q->clk = devm_clk_get(dev, "qspi");
850	if (IS_ERR(q->clk)) {
851		ret = PTR_ERR(q->clk);
852		goto err_put_ctrl;
853	}
854
855	ret = fsl_qspi_clk_prep_enable(q);
856	if (ret) {
857		dev_err(dev, "can not enable the clock\n");
858		goto err_put_ctrl;
859	}
860
861	/* find the irq */
862	ret = platform_get_irq(pdev, 0);
863	if (ret < 0)
864		goto err_disable_clk;
865
866	ret = devm_request_irq(dev, ret,
867			fsl_qspi_irq_handler, 0, pdev->name, q);
868	if (ret) {
869		dev_err(dev, "failed to request irq: %d\n", ret);
870		goto err_disable_clk;
871	}
872
873	mutex_init(&q->lock);
874
875	ctlr->bus_num = -1;
876	ctlr->num_chipselect = 4;
877	ctlr->mem_ops = &fsl_qspi_mem_ops;
878
879	fsl_qspi_default_setup(q);
880
881	ctlr->dev.of_node = np;
882
883	ret = devm_spi_register_controller(dev, ctlr);
884	if (ret)
885		goto err_destroy_mutex;
886
887	return 0;
888
889err_destroy_mutex:
890	mutex_destroy(&q->lock);
891
892err_disable_clk:
893	fsl_qspi_clk_disable_unprep(q);
894
895err_put_ctrl:
896	spi_controller_put(ctlr);
897
898	dev_err(dev, "Freescale QuadSPI probe failed\n");
899	return ret;
900}
901
902static int fsl_qspi_remove(struct platform_device *pdev)
903{
904	struct fsl_qspi *q = platform_get_drvdata(pdev);
905
906	/* disable the hardware */
907	qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
908	qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
909
910	fsl_qspi_clk_disable_unprep(q);
911
912	mutex_destroy(&q->lock);
913
914	return 0;
915}
916
917static int fsl_qspi_suspend(struct device *dev)
918{
919	return 0;
920}
921
922static int fsl_qspi_resume(struct device *dev)
923{
924	struct fsl_qspi *q = dev_get_drvdata(dev);
925
926	fsl_qspi_default_setup(q);
927
928	return 0;
929}
930
931static const struct of_device_id fsl_qspi_dt_ids[] = {
932	{ .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
933	{ .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
934	{ .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
935	{ .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
936	{ .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, },
937	{ .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
938	{ /* sentinel */ }
939};
940MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
941
942static const struct dev_pm_ops fsl_qspi_pm_ops = {
943	.suspend	= fsl_qspi_suspend,
944	.resume		= fsl_qspi_resume,
945};
946
947static struct platform_driver fsl_qspi_driver = {
948	.driver = {
949		.name	= "fsl-quadspi",
950		.of_match_table = fsl_qspi_dt_ids,
951		.pm =   &fsl_qspi_pm_ops,
952	},
953	.probe          = fsl_qspi_probe,
954	.remove		= fsl_qspi_remove,
955};
956module_platform_driver(fsl_qspi_driver);
957
958MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
959MODULE_AUTHOR("Freescale Semiconductor Inc.");
960MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
961MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
962MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>");
963MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>");
964MODULE_LICENSE("GPL v2");