Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2//
   3// Copyright (C) 2020 NVIDIA CORPORATION.
   4
   5#include <linux/clk.h>
   6#include <linux/completion.h>
   7#include <linux/delay.h>
   8#include <linux/dmaengine.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/dmapool.h>
  11#include <linux/err.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/iopoll.h>
  15#include <linux/kernel.h>
  16#include <linux/kthread.h>
  17#include <linux/module.h>
  18#include <linux/platform_device.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/of.h>
  21#include <linux/of_device.h>
  22#include <linux/reset.h>
  23#include <linux/spi/spi.h>
  24#include <linux/acpi.h>
  25#include <linux/property.h>
  26
  27#define QSPI_COMMAND1				0x000
  28#define QSPI_BIT_LENGTH(x)			(((x) & 0x1f) << 0)
  29#define QSPI_PACKED				BIT(5)
  30#define QSPI_INTERFACE_WIDTH_MASK		(0x03 << 7)
  31#define QSPI_INTERFACE_WIDTH(x)			(((x) & 0x03) << 7)
  32#define QSPI_INTERFACE_WIDTH_SINGLE		QSPI_INTERFACE_WIDTH(0)
  33#define QSPI_INTERFACE_WIDTH_DUAL		QSPI_INTERFACE_WIDTH(1)
  34#define QSPI_INTERFACE_WIDTH_QUAD		QSPI_INTERFACE_WIDTH(2)
  35#define QSPI_SDR_DDR_SEL			BIT(9)
  36#define QSPI_TX_EN				BIT(11)
  37#define QSPI_RX_EN				BIT(12)
  38#define QSPI_CS_SW_VAL				BIT(20)
  39#define QSPI_CS_SW_HW				BIT(21)
  40
  41#define QSPI_CS_POL_INACTIVE(n)			(1 << (22 + (n)))
  42#define QSPI_CS_POL_INACTIVE_MASK		(0xF << 22)
  43#define QSPI_CS_SEL_0				(0 << 26)
  44#define QSPI_CS_SEL_1				(1 << 26)
  45#define QSPI_CS_SEL_2				(2 << 26)
  46#define QSPI_CS_SEL_3				(3 << 26)
  47#define QSPI_CS_SEL_MASK			(3 << 26)
  48#define QSPI_CS_SEL(x)				(((x) & 0x3) << 26)
  49
  50#define QSPI_CONTROL_MODE_0			(0 << 28)
  51#define QSPI_CONTROL_MODE_3			(3 << 28)
  52#define QSPI_CONTROL_MODE_MASK			(3 << 28)
  53#define QSPI_M_S				BIT(30)
  54#define QSPI_PIO				BIT(31)
  55
  56#define QSPI_COMMAND2				0x004
  57#define QSPI_TX_TAP_DELAY(x)			(((x) & 0x3f) << 10)
  58#define QSPI_RX_TAP_DELAY(x)			(((x) & 0xff) << 0)
  59
  60#define QSPI_CS_TIMING1				0x008
  61#define QSPI_SETUP_HOLD(setup, hold)		(((setup) << 4) | (hold))
  62
  63#define QSPI_CS_TIMING2				0x00c
  64#define CYCLES_BETWEEN_PACKETS_0(x)		(((x) & 0x1f) << 0)
  65#define CS_ACTIVE_BETWEEN_PACKETS_0		BIT(5)
  66
  67#define QSPI_TRANS_STATUS			0x010
  68#define QSPI_BLK_CNT(val)			(((val) >> 0) & 0xffff)
  69#define QSPI_RDY				BIT(30)
  70
  71#define QSPI_FIFO_STATUS			0x014
  72#define QSPI_RX_FIFO_EMPTY			BIT(0)
  73#define QSPI_RX_FIFO_FULL			BIT(1)
  74#define QSPI_TX_FIFO_EMPTY			BIT(2)
  75#define QSPI_TX_FIFO_FULL			BIT(3)
  76#define QSPI_RX_FIFO_UNF			BIT(4)
  77#define QSPI_RX_FIFO_OVF			BIT(5)
  78#define QSPI_TX_FIFO_UNF			BIT(6)
  79#define QSPI_TX_FIFO_OVF			BIT(7)
  80#define QSPI_ERR				BIT(8)
  81#define QSPI_TX_FIFO_FLUSH			BIT(14)
  82#define QSPI_RX_FIFO_FLUSH			BIT(15)
  83#define QSPI_TX_FIFO_EMPTY_COUNT(val)		(((val) >> 16) & 0x7f)
  84#define QSPI_RX_FIFO_FULL_COUNT(val)		(((val) >> 23) & 0x7f)
  85
  86#define QSPI_FIFO_ERROR				(QSPI_RX_FIFO_UNF | \
  87						 QSPI_RX_FIFO_OVF | \
  88						 QSPI_TX_FIFO_UNF | \
  89						 QSPI_TX_FIFO_OVF)
  90#define QSPI_FIFO_EMPTY				(QSPI_RX_FIFO_EMPTY | \
  91						 QSPI_TX_FIFO_EMPTY)
  92
  93#define QSPI_TX_DATA				0x018
  94#define QSPI_RX_DATA				0x01c
  95
  96#define QSPI_DMA_CTL				0x020
  97#define QSPI_TX_TRIG(n)				(((n) & 0x3) << 15)
  98#define QSPI_TX_TRIG_1				QSPI_TX_TRIG(0)
  99#define QSPI_TX_TRIG_4				QSPI_TX_TRIG(1)
 100#define QSPI_TX_TRIG_8				QSPI_TX_TRIG(2)
 101#define QSPI_TX_TRIG_16				QSPI_TX_TRIG(3)
 102
 103#define QSPI_RX_TRIG(n)				(((n) & 0x3) << 19)
 104#define QSPI_RX_TRIG_1				QSPI_RX_TRIG(0)
 105#define QSPI_RX_TRIG_4				QSPI_RX_TRIG(1)
 106#define QSPI_RX_TRIG_8				QSPI_RX_TRIG(2)
 107#define QSPI_RX_TRIG_16				QSPI_RX_TRIG(3)
 108
 109#define QSPI_DMA_EN				BIT(31)
 110
 111#define QSPI_DMA_BLK				0x024
 112#define QSPI_DMA_BLK_SET(x)			(((x) & 0xffff) << 0)
 113
 114#define QSPI_TX_FIFO				0x108
 115#define QSPI_RX_FIFO				0x188
 116
 117#define QSPI_FIFO_DEPTH				64
 118
 119#define QSPI_INTR_MASK				0x18c
 120#define QSPI_INTR_RX_FIFO_UNF_MASK		BIT(25)
 121#define QSPI_INTR_RX_FIFO_OVF_MASK		BIT(26)
 122#define QSPI_INTR_TX_FIFO_UNF_MASK		BIT(27)
 123#define QSPI_INTR_TX_FIFO_OVF_MASK		BIT(28)
 124#define QSPI_INTR_RDY_MASK			BIT(29)
 125#define QSPI_INTR_RX_TX_FIFO_ERR		(QSPI_INTR_RX_FIFO_UNF_MASK | \
 126						 QSPI_INTR_RX_FIFO_OVF_MASK | \
 127						 QSPI_INTR_TX_FIFO_UNF_MASK | \
 128						 QSPI_INTR_TX_FIFO_OVF_MASK)
 129
 130#define QSPI_MISC_REG                           0x194
 131#define QSPI_NUM_DUMMY_CYCLE(x)			(((x) & 0xff) << 0)
 132#define QSPI_DUMMY_CYCLES_MAX			0xff
 133
 134#define QSPI_CMB_SEQ_CMD			0x19c
 135#define QSPI_COMMAND_VALUE_SET(X)		(((x) & 0xFF) << 0)
 136
 137#define QSPI_CMB_SEQ_CMD_CFG			0x1a0
 138#define QSPI_COMMAND_X1_X2_X4(x)		(((x) & 0x3) << 13)
 139#define QSPI_COMMAND_X1_X2_X4_MASK		(0x03 << 13)
 140#define QSPI_COMMAND_SDR_DDR			BIT(12)
 141#define QSPI_COMMAND_SIZE_SET(x)		(((x) & 0xFF) << 0)
 142
 143#define QSPI_GLOBAL_CONFIG			0X1a4
 144#define QSPI_CMB_SEQ_EN				BIT(0)
 
 145
 146#define QSPI_CMB_SEQ_ADDR			0x1a8
 147#define QSPI_ADDRESS_VALUE_SET(X)		(((x) & 0xFFFF) << 0)
 148
 149#define QSPI_CMB_SEQ_ADDR_CFG			0x1ac
 150#define QSPI_ADDRESS_X1_X2_X4(x)		(((x) & 0x3) << 13)
 151#define QSPI_ADDRESS_X1_X2_X4_MASK		(0x03 << 13)
 152#define QSPI_ADDRESS_SDR_DDR			BIT(12)
 153#define QSPI_ADDRESS_SIZE_SET(x)		(((x) & 0xFF) << 0)
 154
 155#define DATA_DIR_TX				BIT(0)
 156#define DATA_DIR_RX				BIT(1)
 157
 158#define QSPI_DMA_TIMEOUT			(msecs_to_jiffies(1000))
 159#define DEFAULT_QSPI_DMA_BUF_LEN		(64 * 1024)
 160#define CMD_TRANSFER				0
 161#define ADDR_TRANSFER				1
 162#define DATA_TRANSFER				2
 163
 164struct tegra_qspi_soc_data {
 165	bool has_dma;
 166	bool cmb_xfer_capable;
 
 167	unsigned int cs_count;
 168};
 169
 170struct tegra_qspi_client_data {
 171	int tx_clk_tap_delay;
 172	int rx_clk_tap_delay;
 173};
 174
 175struct tegra_qspi {
 176	struct device				*dev;
 177	struct spi_master			*master;
 178	/* lock to protect data accessed by irq */
 179	spinlock_t				lock;
 180
 181	struct clk				*clk;
 182	void __iomem				*base;
 183	phys_addr_t				phys;
 184	unsigned int				irq;
 185
 186	u32					cur_speed;
 187	unsigned int				cur_pos;
 188	unsigned int				words_per_32bit;
 189	unsigned int				bytes_per_word;
 190	unsigned int				curr_dma_words;
 191	unsigned int				cur_direction;
 192
 193	unsigned int				cur_rx_pos;
 194	unsigned int				cur_tx_pos;
 195
 196	unsigned int				dma_buf_size;
 197	unsigned int				max_buf_size;
 198	bool					is_curr_dma_xfer;
 199
 200	struct completion			rx_dma_complete;
 201	struct completion			tx_dma_complete;
 202
 203	u32					tx_status;
 204	u32					rx_status;
 205	u32					status_reg;
 206	bool					is_packed;
 207	bool					use_dma;
 208
 209	u32					command1_reg;
 210	u32					dma_control_reg;
 211	u32					def_command1_reg;
 212	u32					def_command2_reg;
 213	u32					spi_cs_timing1;
 214	u32					spi_cs_timing2;
 215	u8					dummy_cycles;
 216
 217	struct completion			xfer_completion;
 218	struct spi_transfer			*curr_xfer;
 219
 220	struct dma_chan				*rx_dma_chan;
 221	u32					*rx_dma_buf;
 222	dma_addr_t				rx_dma_phys;
 223	struct dma_async_tx_descriptor		*rx_dma_desc;
 224
 225	struct dma_chan				*tx_dma_chan;
 226	u32					*tx_dma_buf;
 227	dma_addr_t				tx_dma_phys;
 228	struct dma_async_tx_descriptor		*tx_dma_desc;
 229	const struct tegra_qspi_soc_data	*soc_data;
 230};
 231
 232static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
 233{
 234	return readl(tqspi->base + offset);
 235}
 236
 237static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
 238{
 239	writel(value, tqspi->base + offset);
 240
 241	/* read back register to make sure that register writes completed */
 242	if (offset != QSPI_TX_FIFO)
 243		readl(tqspi->base + QSPI_COMMAND1);
 244}
 245
 246static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
 247{
 248	u32 value;
 249
 250	/* write 1 to clear status register */
 251	value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
 252	tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
 253
 254	value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
 255	if (!(value & QSPI_INTR_RDY_MASK)) {
 256		value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
 257		tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
 258	}
 259
 260	/* clear fifo status error if any */
 261	value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 262	if (value & QSPI_ERR)
 263		tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
 264}
 265
 266static unsigned int
 267tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
 268{
 269	unsigned int max_word, max_len, total_fifo_words;
 270	unsigned int remain_len = t->len - tqspi->cur_pos;
 271	unsigned int bits_per_word = t->bits_per_word;
 272
 273	tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
 274
 275	/*
 276	 * Tegra QSPI controller supports packed or unpacked mode transfers.
 277	 * Packed mode is used for data transfers using 8, 16, or 32 bits per
 278	 * word with a minimum transfer of 1 word and for all other transfers
 279	 * unpacked mode will be used.
 280	 */
 281
 282	if ((bits_per_word == 8 || bits_per_word == 16 ||
 283	     bits_per_word == 32) && t->len > 3) {
 284		tqspi->is_packed = true;
 285		tqspi->words_per_32bit = 32 / bits_per_word;
 286	} else {
 287		tqspi->is_packed = false;
 288		tqspi->words_per_32bit = 1;
 289	}
 290
 291	if (tqspi->is_packed) {
 292		max_len = min(remain_len, tqspi->max_buf_size);
 293		tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
 294		total_fifo_words = (max_len + 3) / 4;
 295	} else {
 296		max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
 297		max_word = min(max_word, tqspi->max_buf_size / 4);
 298		tqspi->curr_dma_words = max_word;
 299		total_fifo_words = max_word;
 300	}
 301
 302	return total_fifo_words;
 303}
 304
 305static unsigned int
 306tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 307{
 308	unsigned int written_words, fifo_words_left, count;
 309	unsigned int len, tx_empty_count, max_n_32bit, i;
 310	u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
 311	u32 fifo_status;
 312
 313	fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 314	tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
 315
 316	if (tqspi->is_packed) {
 317		fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
 318		written_words = min(fifo_words_left, tqspi->curr_dma_words);
 319		len = written_words * tqspi->bytes_per_word;
 320		max_n_32bit = DIV_ROUND_UP(len, 4);
 321		for (count = 0; count < max_n_32bit; count++) {
 322			u32 x = 0;
 323
 324			for (i = 0; (i < 4) && len; i++, len--)
 325				x |= (u32)(*tx_buf++) << (i * 8);
 326			tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
 327		}
 328
 329		tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
 330	} else {
 331		unsigned int write_bytes;
 332		u8 bytes_per_word = tqspi->bytes_per_word;
 333
 334		max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
 335		written_words = max_n_32bit;
 336		len = written_words * tqspi->bytes_per_word;
 337		if (len > t->len - tqspi->cur_pos)
 338			len = t->len - tqspi->cur_pos;
 339		write_bytes = len;
 340		for (count = 0; count < max_n_32bit; count++) {
 341			u32 x = 0;
 342
 343			for (i = 0; len && (i < bytes_per_word); i++, len--)
 344				x |= (u32)(*tx_buf++) << (i * 8);
 345			tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
 346		}
 347
 348		tqspi->cur_tx_pos += write_bytes;
 349	}
 350
 351	return written_words;
 352}
 353
 354static unsigned int
 355tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 356{
 357	u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
 358	unsigned int len, rx_full_count, count, i;
 359	unsigned int read_words = 0;
 360	u32 fifo_status, x;
 361
 362	fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 363	rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
 364	if (tqspi->is_packed) {
 365		len = tqspi->curr_dma_words * tqspi->bytes_per_word;
 366		for (count = 0; count < rx_full_count; count++) {
 367			x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
 368
 369			for (i = 0; len && (i < 4); i++, len--)
 370				*rx_buf++ = (x >> i * 8) & 0xff;
 371		}
 372
 373		read_words += tqspi->curr_dma_words;
 374		tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
 375	} else {
 376		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
 377		u8 bytes_per_word = tqspi->bytes_per_word;
 378		unsigned int read_bytes;
 379
 380		len = rx_full_count * bytes_per_word;
 381		if (len > t->len - tqspi->cur_pos)
 382			len = t->len - tqspi->cur_pos;
 383		read_bytes = len;
 384		for (count = 0; count < rx_full_count; count++) {
 385			x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
 386
 387			for (i = 0; len && (i < bytes_per_word); i++, len--)
 388				*rx_buf++ = (x >> (i * 8)) & 0xff;
 389		}
 390
 391		read_words += rx_full_count;
 392		tqspi->cur_rx_pos += read_bytes;
 393	}
 394
 395	return read_words;
 396}
 397
 398static void
 399tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 400{
 401	dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
 402				tqspi->dma_buf_size, DMA_TO_DEVICE);
 403
 404	/*
 405	 * In packed mode, each word in FIFO may contain multiple packets
 406	 * based on bits per word. So all bytes in each FIFO word are valid.
 407	 *
 408	 * In unpacked mode, each word in FIFO contains single packet and
 409	 * based on bits per word any remaining bits in FIFO word will be
 410	 * ignored by the hardware and are invalid bits.
 411	 */
 412	if (tqspi->is_packed) {
 413		tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
 414	} else {
 415		u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
 416		unsigned int i, count, consume, write_bytes;
 417
 418		/*
 419		 * Fill tx_dma_buf to contain single packet in each word based
 420		 * on bits per word from SPI core tx_buf.
 421		 */
 422		consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
 423		if (consume > t->len - tqspi->cur_pos)
 424			consume = t->len - tqspi->cur_pos;
 425		write_bytes = consume;
 426		for (count = 0; count < tqspi->curr_dma_words; count++) {
 427			u32 x = 0;
 428
 429			for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
 430				x |= (u32)(*tx_buf++) << (i * 8);
 431			tqspi->tx_dma_buf[count] = x;
 432		}
 433
 434		tqspi->cur_tx_pos += write_bytes;
 435	}
 436
 437	dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
 438				   tqspi->dma_buf_size, DMA_TO_DEVICE);
 439}
 440
 441static void
 442tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 443{
 444	dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
 445				tqspi->dma_buf_size, DMA_FROM_DEVICE);
 446
 447	if (tqspi->is_packed) {
 448		tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
 449	} else {
 450		unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
 451		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
 452		unsigned int i, count, consume, read_bytes;
 453
 454		/*
 455		 * Each FIFO word contains single data packet.
 456		 * Skip invalid bits in each FIFO word based on bits per word
 457		 * and align bytes while filling in SPI core rx_buf.
 458		 */
 459		consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
 460		if (consume > t->len - tqspi->cur_pos)
 461			consume = t->len - tqspi->cur_pos;
 462		read_bytes = consume;
 463		for (count = 0; count < tqspi->curr_dma_words; count++) {
 464			u32 x = tqspi->rx_dma_buf[count] & rx_mask;
 465
 466			for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
 467				*rx_buf++ = (x >> (i * 8)) & 0xff;
 468		}
 469
 470		tqspi->cur_rx_pos += read_bytes;
 471	}
 472
 473	dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
 474				   tqspi->dma_buf_size, DMA_FROM_DEVICE);
 475}
 476
 477static void tegra_qspi_dma_complete(void *args)
 478{
 479	struct completion *dma_complete = args;
 480
 481	complete(dma_complete);
 482}
 483
 484static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
 485{
 486	dma_addr_t tx_dma_phys;
 487
 488	reinit_completion(&tqspi->tx_dma_complete);
 489
 490	if (tqspi->is_packed)
 491		tx_dma_phys = t->tx_dma;
 492	else
 493		tx_dma_phys = tqspi->tx_dma_phys;
 494
 495	tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
 496							 len, DMA_MEM_TO_DEV,
 497							 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
 498
 499	if (!tqspi->tx_dma_desc) {
 500		dev_err(tqspi->dev, "Unable to get TX descriptor\n");
 501		return -EIO;
 502	}
 503
 504	tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
 505	tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
 506	dmaengine_submit(tqspi->tx_dma_desc);
 507	dma_async_issue_pending(tqspi->tx_dma_chan);
 508
 509	return 0;
 510}
 511
 512static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
 513{
 514	dma_addr_t rx_dma_phys;
 515
 516	reinit_completion(&tqspi->rx_dma_complete);
 517
 518	if (tqspi->is_packed)
 519		rx_dma_phys = t->rx_dma;
 520	else
 521		rx_dma_phys = tqspi->rx_dma_phys;
 522
 523	tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
 524							 len, DMA_DEV_TO_MEM,
 525							 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
 526
 527	if (!tqspi->rx_dma_desc) {
 528		dev_err(tqspi->dev, "Unable to get RX descriptor\n");
 529		return -EIO;
 530	}
 531
 532	tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
 533	tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
 534	dmaengine_submit(tqspi->rx_dma_desc);
 535	dma_async_issue_pending(tqspi->rx_dma_chan);
 536
 537	return 0;
 538}
 539
 540static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
 541{
 542	void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
 543	u32 val;
 544
 545	val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 546	if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
 547		return 0;
 548
 549	val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
 550	tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
 551
 552	if (!atomic)
 553		return readl_relaxed_poll_timeout(addr, val,
 554						  (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
 555						  1000, 1000000);
 556
 557	return readl_relaxed_poll_timeout_atomic(addr, val,
 558						 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
 559						 1000, 1000000);
 560}
 561
 562static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
 563{
 564	u32 intr_mask;
 565
 566	intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
 567	intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
 568	tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
 569}
 570
 571static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 572{
 573	u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
 574	u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
 575	unsigned int len;
 576
 577	len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 578
 579	if (t->tx_buf) {
 580		t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
 581		if (dma_mapping_error(tqspi->dev, t->tx_dma))
 582			return -ENOMEM;
 583	}
 584
 585	if (t->rx_buf) {
 586		t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
 587		if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
 588			dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
 589			return -ENOMEM;
 590		}
 591	}
 592
 593	return 0;
 594}
 595
 596static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 597{
 598	unsigned int len;
 599
 600	len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 601
 602	dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
 603	dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
 604}
 605
 606static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 607{
 608	struct dma_slave_config dma_sconfig = { 0 };
 609	unsigned int len;
 610	u8 dma_burst;
 611	int ret = 0;
 612	u32 val;
 613
 614	if (tqspi->is_packed) {
 615		ret = tegra_qspi_dma_map_xfer(tqspi, t);
 616		if (ret < 0)
 617			return ret;
 618	}
 619
 620	val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
 621	tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
 622
 623	tegra_qspi_unmask_irq(tqspi);
 624
 625	if (tqspi->is_packed)
 626		len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 627	else
 628		len = tqspi->curr_dma_words * 4;
 629
 630	/* set attention level based on length of transfer */
 631	val = 0;
 632	if (len & 0xf) {
 633		val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
 634		dma_burst = 1;
 635	} else if (((len) >> 4) & 0x1) {
 636		val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
 637		dma_burst = 4;
 638	} else {
 639		val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
 640		dma_burst = 8;
 641	}
 642
 643	tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
 644	tqspi->dma_control_reg = val;
 645
 646	dma_sconfig.device_fc = true;
 647	if (tqspi->cur_direction & DATA_DIR_TX) {
 648		dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
 649		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 650		dma_sconfig.dst_maxburst = dma_burst;
 651		ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
 652		if (ret < 0) {
 653			dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
 654			return ret;
 655		}
 656
 657		tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
 658		ret = tegra_qspi_start_tx_dma(tqspi, t, len);
 659		if (ret < 0) {
 660			dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
 661			return ret;
 662		}
 663	}
 664
 665	if (tqspi->cur_direction & DATA_DIR_RX) {
 666		dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
 667		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 668		dma_sconfig.src_maxburst = dma_burst;
 669		ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
 670		if (ret < 0) {
 671			dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
 672			return ret;
 673		}
 674
 675		dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
 676					   tqspi->dma_buf_size,
 677					   DMA_FROM_DEVICE);
 678
 679		ret = tegra_qspi_start_rx_dma(tqspi, t, len);
 680		if (ret < 0) {
 681			dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
 682			if (tqspi->cur_direction & DATA_DIR_TX)
 683				dmaengine_terminate_all(tqspi->tx_dma_chan);
 684			return ret;
 685		}
 686	}
 687
 688	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
 689
 690	tqspi->is_curr_dma_xfer = true;
 691	tqspi->dma_control_reg = val;
 692	val |= QSPI_DMA_EN;
 693	tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
 694
 695	return ret;
 696}
 697
 698static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
 699{
 700	u32 val;
 701	unsigned int cur_words;
 702
 703	if (qspi->cur_direction & DATA_DIR_TX)
 704		cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
 705	else
 706		cur_words = qspi->curr_dma_words;
 707
 708	val = QSPI_DMA_BLK_SET(cur_words - 1);
 709	tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
 710
 711	tegra_qspi_unmask_irq(qspi);
 712
 713	qspi->is_curr_dma_xfer = false;
 714	val = qspi->command1_reg;
 715	val |= QSPI_PIO;
 716	tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
 717
 718	return 0;
 719}
 720
 721static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
 722{
 723	if (!tqspi->soc_data->has_dma)
 724		return;
 725
 726	if (tqspi->tx_dma_buf) {
 727		dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
 728				  tqspi->tx_dma_buf, tqspi->tx_dma_phys);
 729		tqspi->tx_dma_buf = NULL;
 730	}
 731
 732	if (tqspi->tx_dma_chan) {
 733		dma_release_channel(tqspi->tx_dma_chan);
 734		tqspi->tx_dma_chan = NULL;
 735	}
 736
 737	if (tqspi->rx_dma_buf) {
 738		dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
 739				  tqspi->rx_dma_buf, tqspi->rx_dma_phys);
 740		tqspi->rx_dma_buf = NULL;
 741	}
 742
 743	if (tqspi->rx_dma_chan) {
 744		dma_release_channel(tqspi->rx_dma_chan);
 745		tqspi->rx_dma_chan = NULL;
 746	}
 747}
 748
 749static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
 750{
 751	struct dma_chan *dma_chan;
 752	dma_addr_t dma_phys;
 753	u32 *dma_buf;
 754	int err;
 755
 756	if (!tqspi->soc_data->has_dma)
 757		return 0;
 758
 759	dma_chan = dma_request_chan(tqspi->dev, "rx");
 760	if (IS_ERR(dma_chan)) {
 761		err = PTR_ERR(dma_chan);
 762		goto err_out;
 763	}
 764
 765	tqspi->rx_dma_chan = dma_chan;
 766
 767	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
 768	if (!dma_buf) {
 769		err = -ENOMEM;
 770		goto err_out;
 771	}
 772
 773	tqspi->rx_dma_buf = dma_buf;
 774	tqspi->rx_dma_phys = dma_phys;
 775
 776	dma_chan = dma_request_chan(tqspi->dev, "tx");
 777	if (IS_ERR(dma_chan)) {
 778		err = PTR_ERR(dma_chan);
 779		goto err_out;
 780	}
 781
 782	tqspi->tx_dma_chan = dma_chan;
 783
 784	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
 785	if (!dma_buf) {
 786		err = -ENOMEM;
 787		goto err_out;
 788	}
 789
 790	tqspi->tx_dma_buf = dma_buf;
 791	tqspi->tx_dma_phys = dma_phys;
 792	tqspi->use_dma = true;
 793
 794	return 0;
 795
 796err_out:
 797	tegra_qspi_deinit_dma(tqspi);
 798
 799	if (err != -EPROBE_DEFER) {
 800		dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
 801		dev_err(tqspi->dev, "falling back to PIO\n");
 802		return 0;
 803	}
 804
 805	return err;
 806}
 807
 808static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
 809					 bool is_first_of_msg)
 810{
 811	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
 812	struct tegra_qspi_client_data *cdata = spi->controller_data;
 813	u32 command1, command2, speed = t->speed_hz;
 814	u8 bits_per_word = t->bits_per_word;
 815	u32 tx_tap = 0, rx_tap = 0;
 816	int req_mode;
 817
 818	if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
 819		clk_set_rate(tqspi->clk, speed);
 820		tqspi->cur_speed = speed;
 821	}
 822
 823	tqspi->cur_pos = 0;
 824	tqspi->cur_rx_pos = 0;
 825	tqspi->cur_tx_pos = 0;
 826	tqspi->curr_xfer = t;
 827
 828	if (is_first_of_msg) {
 829		tegra_qspi_mask_clear_irq(tqspi);
 830
 831		command1 = tqspi->def_command1_reg;
 832		command1 |= QSPI_CS_SEL(spi->chip_select);
 833		command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
 834
 835		command1 &= ~QSPI_CONTROL_MODE_MASK;
 836		req_mode = spi->mode & 0x3;
 837		if (req_mode == SPI_MODE_3)
 838			command1 |= QSPI_CONTROL_MODE_3;
 839		else
 840			command1 |= QSPI_CONTROL_MODE_0;
 841
 842		if (spi->mode & SPI_CS_HIGH)
 843			command1 |= QSPI_CS_SW_VAL;
 844		else
 845			command1 &= ~QSPI_CS_SW_VAL;
 846		tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
 847
 848		if (cdata && cdata->tx_clk_tap_delay)
 849			tx_tap = cdata->tx_clk_tap_delay;
 850
 851		if (cdata && cdata->rx_clk_tap_delay)
 852			rx_tap = cdata->rx_clk_tap_delay;
 853
 854		command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
 855		if (command2 != tqspi->def_command2_reg)
 856			tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
 857
 858	} else {
 859		command1 = tqspi->command1_reg;
 860		command1 &= ~QSPI_BIT_LENGTH(~0);
 861		command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
 862	}
 863
 864	command1 &= ~QSPI_SDR_DDR_SEL;
 865
 866	return command1;
 867}
 868
 869static int tegra_qspi_start_transfer_one(struct spi_device *spi,
 870					 struct spi_transfer *t, u32 command1)
 871{
 872	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
 873	unsigned int total_fifo_words;
 874	u8 bus_width = 0;
 875	int ret;
 876
 877	total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
 878
 879	command1 &= ~QSPI_PACKED;
 880	if (tqspi->is_packed)
 881		command1 |= QSPI_PACKED;
 882	tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
 883
 884	tqspi->cur_direction = 0;
 885
 886	command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
 887	if (t->rx_buf) {
 888		command1 |= QSPI_RX_EN;
 889		tqspi->cur_direction |= DATA_DIR_RX;
 890		bus_width = t->rx_nbits;
 891	}
 892
 893	if (t->tx_buf) {
 894		command1 |= QSPI_TX_EN;
 895		tqspi->cur_direction |= DATA_DIR_TX;
 896		bus_width = t->tx_nbits;
 897	}
 898
 899	command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
 900
 901	if (bus_width == SPI_NBITS_QUAD)
 902		command1 |= QSPI_INTERFACE_WIDTH_QUAD;
 903	else if (bus_width == SPI_NBITS_DUAL)
 904		command1 |= QSPI_INTERFACE_WIDTH_DUAL;
 905	else
 906		command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
 907
 908	tqspi->command1_reg = command1;
 909
 910	tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
 911
 912	ret = tegra_qspi_flush_fifos(tqspi, false);
 913	if (ret < 0)
 914		return ret;
 915
 916	if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
 917		ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
 918	else
 919		ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
 920
 921	return ret;
 922}
 923
 924static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
 925{
 926	struct tegra_qspi_client_data *cdata;
 927	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
 928
 929	cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
 930	if (!cdata)
 931		return NULL;
 932
 933	device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
 934				 &cdata->tx_clk_tap_delay);
 935	device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
 936				 &cdata->rx_clk_tap_delay);
 937
 938	return cdata;
 939}
 940
 941static int tegra_qspi_setup(struct spi_device *spi)
 942{
 943	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
 944	struct tegra_qspi_client_data *cdata = spi->controller_data;
 945	unsigned long flags;
 946	u32 val;
 947	int ret;
 948
 949	ret = pm_runtime_resume_and_get(tqspi->dev);
 950	if (ret < 0) {
 951		dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
 952		return ret;
 953	}
 954
 955	if (!cdata) {
 956		cdata = tegra_qspi_parse_cdata_dt(spi);
 957		spi->controller_data = cdata;
 958	}
 959	spin_lock_irqsave(&tqspi->lock, flags);
 960
 961	/* keep default cs state to inactive */
 962	val = tqspi->def_command1_reg;
 963	val |= QSPI_CS_SEL(spi->chip_select);
 964	if (spi->mode & SPI_CS_HIGH)
 965		val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select);
 966	else
 967		val |= QSPI_CS_POL_INACTIVE(spi->chip_select);
 968
 969	tqspi->def_command1_reg = val;
 970	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
 971
 972	spin_unlock_irqrestore(&tqspi->lock, flags);
 973
 974	pm_runtime_put(tqspi->dev);
 975
 976	return 0;
 977}
 978
 979static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
 980{
 981	dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
 982	dev_dbg(tqspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
 983		tegra_qspi_readl(tqspi, QSPI_COMMAND1),
 984		tegra_qspi_readl(tqspi, QSPI_COMMAND2));
 985	dev_dbg(tqspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
 986		tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
 987		tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
 988	dev_dbg(tqspi->dev, "INTR_MASK:  0x%08x | MISC: 0x%08x\n",
 989		tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
 990		tegra_qspi_readl(tqspi, QSPI_MISC_REG));
 991	dev_dbg(tqspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
 992		tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
 993		tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
 994}
 995
 996static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
 997{
 998	dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
 999	tegra_qspi_dump_regs(tqspi);
1000	tegra_qspi_flush_fifos(tqspi, true);
1001	if (device_reset(tqspi->dev) < 0)
1002		dev_warn_once(tqspi->dev, "device reset failed\n");
1003}
1004
1005static void tegra_qspi_transfer_end(struct spi_device *spi)
1006{
1007	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
1008	int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1009
1010	if (cs_val)
1011		tqspi->command1_reg |= QSPI_CS_SW_VAL;
1012	else
1013		tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
1014	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1015	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1016}
1017
1018static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
1019{
1020	u32 cmd_config = 0;
1021
1022	/* Extract Command configuration and value */
1023	if (is_ddr)
1024		cmd_config |= QSPI_COMMAND_SDR_DDR;
1025	else
1026		cmd_config &= ~QSPI_COMMAND_SDR_DDR;
1027
1028	cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
1029	cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
1030
1031	return cmd_config;
1032}
1033
1034static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
1035{
1036	u32 addr_config = 0;
1037
1038	/* Extract Address configuration and value */
1039	is_ddr = 0; //Only SDR mode supported
1040	bus_width = 0; //X1 mode
1041
1042	if (is_ddr)
1043		addr_config |= QSPI_ADDRESS_SDR_DDR;
1044	else
1045		addr_config &= ~QSPI_ADDRESS_SDR_DDR;
1046
1047	addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
1048	addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
1049
1050	return addr_config;
1051}
1052
1053static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
1054					struct spi_message *msg)
1055{
1056	bool is_first_msg = true;
1057	struct spi_transfer *xfer;
1058	struct spi_device *spi = msg->spi;
1059	u8 transfer_phase = 0;
1060	u32 cmd1 = 0, dma_ctl = 0;
1061	int ret = 0;
1062	u32 address_value = 0;
1063	u32 cmd_config = 0, addr_config = 0;
1064	u8 cmd_value = 0, val = 0;
1065
1066	/* Enable Combined sequence mode */
1067	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
 
 
 
 
 
 
1068	val |= QSPI_CMB_SEQ_EN;
1069	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1070	/* Process individual transfer list */
1071	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1072		switch (transfer_phase) {
1073		case CMD_TRANSFER:
1074			/* X1 SDR mode */
1075			cmd_config = tegra_qspi_cmd_config(false, 0,
1076							   xfer->len);
1077			cmd_value = *((const u8 *)(xfer->tx_buf));
1078			break;
1079		case ADDR_TRANSFER:
1080			/* X1 SDR mode */
1081			addr_config = tegra_qspi_addr_config(false, 0,
1082							     xfer->len);
1083			address_value = *((const u32 *)(xfer->tx_buf));
1084			break;
1085		case DATA_TRANSFER:
1086			/* Program Command, Address value in register */
1087			tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
1088			tegra_qspi_writel(tqspi, address_value,
1089					  QSPI_CMB_SEQ_ADDR);
1090			/* Program Command and Address config in register */
1091			tegra_qspi_writel(tqspi, cmd_config,
1092					  QSPI_CMB_SEQ_CMD_CFG);
1093			tegra_qspi_writel(tqspi, addr_config,
1094					  QSPI_CMB_SEQ_ADDR_CFG);
1095
1096			reinit_completion(&tqspi->xfer_completion);
1097			cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
1098							     is_first_msg);
1099			ret = tegra_qspi_start_transfer_one(spi, xfer,
1100							    cmd1);
1101
1102			if (ret < 0) {
1103				dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
1104					ret);
1105				return ret;
1106			}
1107
1108			is_first_msg = false;
1109			ret = wait_for_completion_timeout
1110					(&tqspi->xfer_completion,
1111					QSPI_DMA_TIMEOUT);
1112
1113			if (WARN_ON(ret == 0)) {
1114				dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
1115					ret);
1116				if (tqspi->is_curr_dma_xfer &&
1117				    (tqspi->cur_direction & DATA_DIR_TX))
1118					dmaengine_terminate_all
1119						(tqspi->tx_dma_chan);
1120
1121				if (tqspi->is_curr_dma_xfer &&
1122				    (tqspi->cur_direction & DATA_DIR_RX))
1123					dmaengine_terminate_all
1124						(tqspi->rx_dma_chan);
1125
1126				/* Abort transfer by resetting pio/dma bit */
1127				if (!tqspi->is_curr_dma_xfer) {
1128					cmd1 = tegra_qspi_readl
1129							(tqspi,
1130							 QSPI_COMMAND1);
1131					cmd1 &= ~QSPI_PIO;
1132					tegra_qspi_writel
1133							(tqspi, cmd1,
1134							 QSPI_COMMAND1);
1135				} else {
1136					dma_ctl = tegra_qspi_readl
1137							(tqspi,
1138							 QSPI_DMA_CTL);
1139					dma_ctl &= ~QSPI_DMA_EN;
1140					tegra_qspi_writel(tqspi, dma_ctl,
1141							  QSPI_DMA_CTL);
1142				}
1143
1144				/* Reset controller if timeout happens */
1145				if (device_reset(tqspi->dev) < 0)
1146					dev_warn_once(tqspi->dev,
1147						      "device reset failed\n");
1148				ret = -EIO;
1149				goto exit;
1150			}
1151
1152			if (tqspi->tx_status ||  tqspi->rx_status) {
1153				dev_err(tqspi->dev, "QSPI Transfer failed\n");
1154				tqspi->tx_status = 0;
1155				tqspi->rx_status = 0;
1156				ret = -EIO;
1157				goto exit;
1158			}
 
 
 
 
1159			break;
1160		default:
1161			ret = -EINVAL;
1162			goto exit;
1163		}
1164		msg->actual_length += xfer->len;
1165		transfer_phase++;
1166	}
1167	if (!xfer->cs_change) {
1168		tegra_qspi_transfer_end(spi);
1169		spi_transfer_delay_exec(xfer);
1170	}
1171	ret = 0;
1172
1173exit:
1174	msg->status = ret;
 
 
 
 
1175
1176	return ret;
1177}
1178
1179static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
1180					    struct spi_message *msg)
1181{
1182	struct spi_device *spi = msg->spi;
1183	struct spi_transfer *transfer;
1184	bool is_first_msg = true;
1185	int ret = 0, val = 0;
1186
1187	msg->status = 0;
1188	msg->actual_length = 0;
1189	tqspi->tx_status = 0;
1190	tqspi->rx_status = 0;
1191
1192	/* Disable Combined sequence mode */
1193	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1194	val &= ~QSPI_CMB_SEQ_EN;
 
 
1195	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1196	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
1197		struct spi_transfer *xfer = transfer;
1198		u8 dummy_bytes = 0;
1199		u32 cmd1;
1200
1201		tqspi->dummy_cycles = 0;
1202		/*
1203		 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
1204		 * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
1205		 * So, check if the next transfer is dummy data transfer and program dummy
1206		 * clock cycles along with the current transfer and skip next transfer.
1207		 */
1208		if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
1209			struct spi_transfer *next_xfer;
1210
1211			next_xfer = list_next_entry(xfer, transfer_list);
1212			if (next_xfer->dummy_data) {
1213				u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
1214
1215				if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
1216					tqspi->dummy_cycles = dummy_cycles;
1217					dummy_bytes = next_xfer->len;
1218					transfer = next_xfer;
1219				}
1220			}
1221		}
1222
1223		reinit_completion(&tqspi->xfer_completion);
1224
1225		cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
1226
1227		ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
1228		if (ret < 0) {
1229			dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
1230			goto complete_xfer;
1231		}
1232
1233		ret = wait_for_completion_timeout(&tqspi->xfer_completion,
1234						  QSPI_DMA_TIMEOUT);
1235		if (WARN_ON(ret == 0)) {
1236			dev_err(tqspi->dev, "transfer timeout\n");
1237			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
1238				dmaengine_terminate_all(tqspi->tx_dma_chan);
1239			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
1240				dmaengine_terminate_all(tqspi->rx_dma_chan);
1241			tegra_qspi_handle_error(tqspi);
1242			ret = -EIO;
1243			goto complete_xfer;
1244		}
1245
1246		if (tqspi->tx_status ||  tqspi->rx_status) {
1247			tegra_qspi_handle_error(tqspi);
1248			ret = -EIO;
1249			goto complete_xfer;
1250		}
1251
1252		msg->actual_length += xfer->len + dummy_bytes;
1253
1254complete_xfer:
1255		if (ret < 0) {
1256			tegra_qspi_transfer_end(spi);
1257			spi_transfer_delay_exec(xfer);
1258			goto exit;
1259		}
1260
1261		if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
1262			/* de-activate CS after last transfer only when cs_change is not set */
1263			if (!xfer->cs_change) {
1264				tegra_qspi_transfer_end(spi);
1265				spi_transfer_delay_exec(xfer);
1266			}
1267		} else if (xfer->cs_change) {
1268			 /* de-activated CS between the transfers only when cs_change is set */
1269			tegra_qspi_transfer_end(spi);
1270			spi_transfer_delay_exec(xfer);
1271		}
1272	}
1273
1274	ret = 0;
1275exit:
1276	msg->status = ret;
1277
1278	return ret;
1279}
1280
1281static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
1282					struct spi_message *msg)
1283{
1284	int transfer_count = 0;
1285	struct spi_transfer *xfer;
1286
1287	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1288		transfer_count++;
1289	}
1290	if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
1291		return false;
1292	xfer = list_first_entry(&msg->transfers, typeof(*xfer),
1293				transfer_list);
1294	if (xfer->len > 2)
1295		return false;
1296	xfer = list_next_entry(xfer, transfer_list);
1297	if (xfer->len > 4 || xfer->len < 3)
1298		return false;
1299	xfer = list_next_entry(xfer, transfer_list);
1300	if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
1301		return false;
1302
1303	return true;
1304}
1305
1306static int tegra_qspi_transfer_one_message(struct spi_master *master,
1307					   struct spi_message *msg)
1308{
1309	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1310	int ret;
1311
1312	if (tegra_qspi_validate_cmb_seq(tqspi, msg))
1313		ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
1314	else
1315		ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
1316
1317	spi_finalize_current_message(master);
1318
1319	return ret;
1320}
1321
1322static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
1323{
1324	struct spi_transfer *t = tqspi->curr_xfer;
1325	unsigned long flags;
1326
1327	spin_lock_irqsave(&tqspi->lock, flags);
1328
1329	if (tqspi->tx_status ||  tqspi->rx_status) {
1330		tegra_qspi_handle_error(tqspi);
1331		complete(&tqspi->xfer_completion);
1332		goto exit;
1333	}
1334
1335	if (tqspi->cur_direction & DATA_DIR_RX)
1336		tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
1337
1338	if (tqspi->cur_direction & DATA_DIR_TX)
1339		tqspi->cur_pos = tqspi->cur_tx_pos;
1340	else
1341		tqspi->cur_pos = tqspi->cur_rx_pos;
1342
1343	if (tqspi->cur_pos == t->len) {
1344		complete(&tqspi->xfer_completion);
1345		goto exit;
1346	}
1347
1348	tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1349	tegra_qspi_start_cpu_based_transfer(tqspi, t);
1350exit:
1351	spin_unlock_irqrestore(&tqspi->lock, flags);
1352	return IRQ_HANDLED;
1353}
1354
1355static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1356{
1357	struct spi_transfer *t = tqspi->curr_xfer;
1358	unsigned int total_fifo_words;
1359	unsigned long flags;
1360	long wait_status;
1361	int err = 0;
1362
1363	if (tqspi->cur_direction & DATA_DIR_TX) {
1364		if (tqspi->tx_status) {
1365			dmaengine_terminate_all(tqspi->tx_dma_chan);
1366			err += 1;
1367		} else {
1368			wait_status = wait_for_completion_interruptible_timeout(
1369				&tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
1370			if (wait_status <= 0) {
1371				dmaengine_terminate_all(tqspi->tx_dma_chan);
1372				dev_err(tqspi->dev, "failed TX DMA transfer\n");
1373				err += 1;
1374			}
1375		}
1376	}
1377
1378	if (tqspi->cur_direction & DATA_DIR_RX) {
1379		if (tqspi->rx_status) {
1380			dmaengine_terminate_all(tqspi->rx_dma_chan);
1381			err += 2;
1382		} else {
1383			wait_status = wait_for_completion_interruptible_timeout(
1384				&tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
1385			if (wait_status <= 0) {
1386				dmaengine_terminate_all(tqspi->rx_dma_chan);
1387				dev_err(tqspi->dev, "failed RX DMA transfer\n");
1388				err += 2;
1389			}
1390		}
1391	}
1392
1393	spin_lock_irqsave(&tqspi->lock, flags);
1394
1395	if (err) {
1396		tegra_qspi_dma_unmap_xfer(tqspi, t);
1397		tegra_qspi_handle_error(tqspi);
1398		complete(&tqspi->xfer_completion);
1399		goto exit;
1400	}
1401
1402	if (tqspi->cur_direction & DATA_DIR_RX)
1403		tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
1404
1405	if (tqspi->cur_direction & DATA_DIR_TX)
1406		tqspi->cur_pos = tqspi->cur_tx_pos;
1407	else
1408		tqspi->cur_pos = tqspi->cur_rx_pos;
1409
1410	if (tqspi->cur_pos == t->len) {
1411		tegra_qspi_dma_unmap_xfer(tqspi, t);
1412		complete(&tqspi->xfer_completion);
1413		goto exit;
1414	}
1415
1416	tegra_qspi_dma_unmap_xfer(tqspi, t);
1417
1418	/* continue transfer in current message */
1419	total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1420	if (total_fifo_words > QSPI_FIFO_DEPTH)
1421		err = tegra_qspi_start_dma_based_transfer(tqspi, t);
1422	else
1423		err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1424
1425exit:
1426	spin_unlock_irqrestore(&tqspi->lock, flags);
1427	return IRQ_HANDLED;
1428}
1429
1430static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1431{
1432	struct tegra_qspi *tqspi = context_data;
1433
1434	tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
1435
1436	if (tqspi->cur_direction & DATA_DIR_TX)
1437		tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
1438
1439	if (tqspi->cur_direction & DATA_DIR_RX)
1440		tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
1441
1442	tegra_qspi_mask_clear_irq(tqspi);
1443
1444	if (!tqspi->is_curr_dma_xfer)
1445		return handle_cpu_based_xfer(tqspi);
1446
1447	return handle_dma_based_xfer(tqspi);
1448}
1449
1450static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
1451	.has_dma = true,
1452	.cmb_xfer_capable = false,
 
1453	.cs_count = 1,
1454};
1455
1456static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
1457	.has_dma = true,
1458	.cmb_xfer_capable = true,
 
1459	.cs_count = 1,
1460};
1461
1462static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
1463	.has_dma = false,
1464	.cmb_xfer_capable = true,
 
1465	.cs_count = 1,
1466};
1467
1468static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
1469	.has_dma = false,
1470	.cmb_xfer_capable = true,
 
1471	.cs_count = 4,
1472};
1473
1474static const struct of_device_id tegra_qspi_of_match[] = {
1475	{
1476		.compatible = "nvidia,tegra210-qspi",
1477		.data	    = &tegra210_qspi_soc_data,
1478	}, {
1479		.compatible = "nvidia,tegra186-qspi",
1480		.data	    = &tegra186_qspi_soc_data,
1481	}, {
1482		.compatible = "nvidia,tegra194-qspi",
1483		.data	    = &tegra186_qspi_soc_data,
1484	}, {
1485		.compatible = "nvidia,tegra234-qspi",
1486		.data	    = &tegra234_qspi_soc_data,
1487	}, {
1488		.compatible = "nvidia,tegra241-qspi",
1489		.data	    = &tegra241_qspi_soc_data,
1490	},
1491	{}
1492};
1493
1494MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
1495
1496#ifdef CONFIG_ACPI
1497static const struct acpi_device_id tegra_qspi_acpi_match[] = {
1498	{
1499		.id = "NVDA1213",
1500		.driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
1501	}, {
1502		.id = "NVDA1313",
1503		.driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
1504	}, {
1505		.id = "NVDA1413",
1506		.driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
1507	}, {
1508		.id = "NVDA1513",
1509		.driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
1510	},
1511	{}
1512};
1513
1514MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
1515#endif
1516
1517static int tegra_qspi_probe(struct platform_device *pdev)
1518{
1519	struct spi_master	*master;
1520	struct tegra_qspi	*tqspi;
1521	struct resource		*r;
1522	int ret, qspi_irq;
1523	int bus_num;
1524
1525	master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
1526	if (!master)
1527		return -ENOMEM;
1528
1529	platform_set_drvdata(pdev, master);
1530	tqspi = spi_master_get_devdata(master);
1531
1532	master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
1533			    SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
1534	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
1535	master->setup = tegra_qspi_setup;
1536	master->transfer_one_message = tegra_qspi_transfer_one_message;
1537	master->num_chipselect = 1;
1538	master->auto_runtime_pm = true;
 
1539
1540	bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1541	if (bus_num >= 0)
1542		master->bus_num = bus_num;
1543
1544	tqspi->master = master;
1545	tqspi->dev = &pdev->dev;
1546	spin_lock_init(&tqspi->lock);
1547
1548	tqspi->soc_data = device_get_match_data(&pdev->dev);
1549	master->num_chipselect = tqspi->soc_data->cs_count;
1550	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1551	tqspi->base = devm_ioremap_resource(&pdev->dev, r);
1552	if (IS_ERR(tqspi->base))
1553		return PTR_ERR(tqspi->base);
1554
1555	tqspi->phys = r->start;
1556	qspi_irq = platform_get_irq(pdev, 0);
1557	if (qspi_irq < 0)
1558		return qspi_irq;
1559	tqspi->irq = qspi_irq;
1560
1561	if (!has_acpi_companion(tqspi->dev)) {
1562		tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
1563		if (IS_ERR(tqspi->clk)) {
1564			ret = PTR_ERR(tqspi->clk);
1565			dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
1566			return ret;
1567		}
1568
1569	}
1570
1571	tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
1572	tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
1573
1574	ret = tegra_qspi_init_dma(tqspi);
1575	if (ret < 0)
1576		return ret;
1577
1578	if (tqspi->use_dma)
1579		tqspi->max_buf_size = tqspi->dma_buf_size;
1580
1581	init_completion(&tqspi->tx_dma_complete);
1582	init_completion(&tqspi->rx_dma_complete);
1583	init_completion(&tqspi->xfer_completion);
1584
1585	pm_runtime_enable(&pdev->dev);
1586	ret = pm_runtime_resume_and_get(&pdev->dev);
1587	if (ret < 0) {
1588		dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
1589		goto exit_pm_disable;
1590	}
1591
1592	if (device_reset(tqspi->dev) < 0)
1593		dev_warn_once(tqspi->dev, "device reset failed\n");
1594
1595	tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW |  QSPI_CS_SW_VAL;
1596	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1597	tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
1598	tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
1599	tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
1600
1601	pm_runtime_put(&pdev->dev);
1602
1603	ret = request_threaded_irq(tqspi->irq, NULL,
1604				   tegra_qspi_isr_thread, IRQF_ONESHOT,
1605				   dev_name(&pdev->dev), tqspi);
1606	if (ret < 0) {
1607		dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
1608		goto exit_pm_disable;
1609	}
1610
1611	master->dev.of_node = pdev->dev.of_node;
1612	ret = spi_register_master(master);
1613	if (ret < 0) {
1614		dev_err(&pdev->dev, "failed to register master: %d\n", ret);
1615		goto exit_free_irq;
1616	}
1617
1618	return 0;
1619
1620exit_free_irq:
1621	free_irq(qspi_irq, tqspi);
1622exit_pm_disable:
1623	pm_runtime_force_suspend(&pdev->dev);
1624	tegra_qspi_deinit_dma(tqspi);
1625	return ret;
1626}
1627
1628static int tegra_qspi_remove(struct platform_device *pdev)
1629{
1630	struct spi_master *master = platform_get_drvdata(pdev);
1631	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1632
1633	spi_unregister_master(master);
1634	free_irq(tqspi->irq, tqspi);
1635	pm_runtime_force_suspend(&pdev->dev);
1636	tegra_qspi_deinit_dma(tqspi);
1637
1638	return 0;
1639}
1640
1641static int __maybe_unused tegra_qspi_suspend(struct device *dev)
1642{
1643	struct spi_master *master = dev_get_drvdata(dev);
1644
1645	return spi_master_suspend(master);
1646}
1647
1648static int __maybe_unused tegra_qspi_resume(struct device *dev)
1649{
1650	struct spi_master *master = dev_get_drvdata(dev);
1651	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1652	int ret;
1653
1654	ret = pm_runtime_resume_and_get(dev);
1655	if (ret < 0) {
1656		dev_err(dev, "failed to get runtime PM: %d\n", ret);
1657		return ret;
1658	}
1659
1660	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1661	tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
1662	pm_runtime_put(dev);
1663
1664	return spi_master_resume(master);
1665}
1666
1667static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
1668{
1669	struct spi_master *master = dev_get_drvdata(dev);
1670	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1671
1672	/* Runtime pm disabled with ACPI */
1673	if (has_acpi_companion(tqspi->dev))
1674		return 0;
1675	/* flush all write which are in PPSB queue by reading back */
1676	tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1677
1678	clk_disable_unprepare(tqspi->clk);
1679
1680	return 0;
1681}
1682
1683static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
1684{
1685	struct spi_master *master = dev_get_drvdata(dev);
1686	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1687	int ret;
1688
1689	/* Runtime pm disabled with ACPI */
1690	if (has_acpi_companion(tqspi->dev))
1691		return 0;
1692	ret = clk_prepare_enable(tqspi->clk);
1693	if (ret < 0)
1694		dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
1695
1696	return ret;
1697}
1698
1699static const struct dev_pm_ops tegra_qspi_pm_ops = {
1700	SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
1701	SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
1702};
1703
1704static struct platform_driver tegra_qspi_driver = {
1705	.driver = {
1706		.name		= "tegra-qspi",
1707		.pm		= &tegra_qspi_pm_ops,
1708		.of_match_table	= tegra_qspi_of_match,
1709		.acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
1710	},
1711	.probe =	tegra_qspi_probe,
1712	.remove =	tegra_qspi_remove,
1713};
1714module_platform_driver(tegra_qspi_driver);
1715
1716MODULE_ALIAS("platform:qspi-tegra");
1717MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
1718MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
1719MODULE_LICENSE("GPL v2");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2//
   3// Copyright (C) 2020 NVIDIA CORPORATION.
   4
   5#include <linux/clk.h>
   6#include <linux/completion.h>
   7#include <linux/delay.h>
   8#include <linux/dmaengine.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/dmapool.h>
  11#include <linux/err.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/iopoll.h>
  15#include <linux/kernel.h>
  16#include <linux/kthread.h>
  17#include <linux/module.h>
  18#include <linux/platform_device.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/of.h>
 
  21#include <linux/reset.h>
  22#include <linux/spi/spi.h>
  23#include <linux/acpi.h>
  24#include <linux/property.h>
  25
  26#define QSPI_COMMAND1				0x000
  27#define QSPI_BIT_LENGTH(x)			(((x) & 0x1f) << 0)
  28#define QSPI_PACKED				BIT(5)
  29#define QSPI_INTERFACE_WIDTH_MASK		(0x03 << 7)
  30#define QSPI_INTERFACE_WIDTH(x)			(((x) & 0x03) << 7)
  31#define QSPI_INTERFACE_WIDTH_SINGLE		QSPI_INTERFACE_WIDTH(0)
  32#define QSPI_INTERFACE_WIDTH_DUAL		QSPI_INTERFACE_WIDTH(1)
  33#define QSPI_INTERFACE_WIDTH_QUAD		QSPI_INTERFACE_WIDTH(2)
  34#define QSPI_SDR_DDR_SEL			BIT(9)
  35#define QSPI_TX_EN				BIT(11)
  36#define QSPI_RX_EN				BIT(12)
  37#define QSPI_CS_SW_VAL				BIT(20)
  38#define QSPI_CS_SW_HW				BIT(21)
  39
  40#define QSPI_CS_POL_INACTIVE(n)			(1 << (22 + (n)))
  41#define QSPI_CS_POL_INACTIVE_MASK		(0xF << 22)
  42#define QSPI_CS_SEL_0				(0 << 26)
  43#define QSPI_CS_SEL_1				(1 << 26)
  44#define QSPI_CS_SEL_2				(2 << 26)
  45#define QSPI_CS_SEL_3				(3 << 26)
  46#define QSPI_CS_SEL_MASK			(3 << 26)
  47#define QSPI_CS_SEL(x)				(((x) & 0x3) << 26)
  48
  49#define QSPI_CONTROL_MODE_0			(0 << 28)
  50#define QSPI_CONTROL_MODE_3			(3 << 28)
  51#define QSPI_CONTROL_MODE_MASK			(3 << 28)
  52#define QSPI_M_S				BIT(30)
  53#define QSPI_PIO				BIT(31)
  54
  55#define QSPI_COMMAND2				0x004
  56#define QSPI_TX_TAP_DELAY(x)			(((x) & 0x3f) << 10)
  57#define QSPI_RX_TAP_DELAY(x)			(((x) & 0xff) << 0)
  58
  59#define QSPI_CS_TIMING1				0x008
  60#define QSPI_SETUP_HOLD(setup, hold)		(((setup) << 4) | (hold))
  61
  62#define QSPI_CS_TIMING2				0x00c
  63#define CYCLES_BETWEEN_PACKETS_0(x)		(((x) & 0x1f) << 0)
  64#define CS_ACTIVE_BETWEEN_PACKETS_0		BIT(5)
  65
  66#define QSPI_TRANS_STATUS			0x010
  67#define QSPI_BLK_CNT(val)			(((val) >> 0) & 0xffff)
  68#define QSPI_RDY				BIT(30)
  69
  70#define QSPI_FIFO_STATUS			0x014
  71#define QSPI_RX_FIFO_EMPTY			BIT(0)
  72#define QSPI_RX_FIFO_FULL			BIT(1)
  73#define QSPI_TX_FIFO_EMPTY			BIT(2)
  74#define QSPI_TX_FIFO_FULL			BIT(3)
  75#define QSPI_RX_FIFO_UNF			BIT(4)
  76#define QSPI_RX_FIFO_OVF			BIT(5)
  77#define QSPI_TX_FIFO_UNF			BIT(6)
  78#define QSPI_TX_FIFO_OVF			BIT(7)
  79#define QSPI_ERR				BIT(8)
  80#define QSPI_TX_FIFO_FLUSH			BIT(14)
  81#define QSPI_RX_FIFO_FLUSH			BIT(15)
  82#define QSPI_TX_FIFO_EMPTY_COUNT(val)		(((val) >> 16) & 0x7f)
  83#define QSPI_RX_FIFO_FULL_COUNT(val)		(((val) >> 23) & 0x7f)
  84
  85#define QSPI_FIFO_ERROR				(QSPI_RX_FIFO_UNF | \
  86						 QSPI_RX_FIFO_OVF | \
  87						 QSPI_TX_FIFO_UNF | \
  88						 QSPI_TX_FIFO_OVF)
  89#define QSPI_FIFO_EMPTY				(QSPI_RX_FIFO_EMPTY | \
  90						 QSPI_TX_FIFO_EMPTY)
  91
  92#define QSPI_TX_DATA				0x018
  93#define QSPI_RX_DATA				0x01c
  94
  95#define QSPI_DMA_CTL				0x020
  96#define QSPI_TX_TRIG(n)				(((n) & 0x3) << 15)
  97#define QSPI_TX_TRIG_1				QSPI_TX_TRIG(0)
  98#define QSPI_TX_TRIG_4				QSPI_TX_TRIG(1)
  99#define QSPI_TX_TRIG_8				QSPI_TX_TRIG(2)
 100#define QSPI_TX_TRIG_16				QSPI_TX_TRIG(3)
 101
 102#define QSPI_RX_TRIG(n)				(((n) & 0x3) << 19)
 103#define QSPI_RX_TRIG_1				QSPI_RX_TRIG(0)
 104#define QSPI_RX_TRIG_4				QSPI_RX_TRIG(1)
 105#define QSPI_RX_TRIG_8				QSPI_RX_TRIG(2)
 106#define QSPI_RX_TRIG_16				QSPI_RX_TRIG(3)
 107
 108#define QSPI_DMA_EN				BIT(31)
 109
 110#define QSPI_DMA_BLK				0x024
 111#define QSPI_DMA_BLK_SET(x)			(((x) & 0xffff) << 0)
 112
 113#define QSPI_TX_FIFO				0x108
 114#define QSPI_RX_FIFO				0x188
 115
 116#define QSPI_FIFO_DEPTH				64
 117
 118#define QSPI_INTR_MASK				0x18c
 119#define QSPI_INTR_RX_FIFO_UNF_MASK		BIT(25)
 120#define QSPI_INTR_RX_FIFO_OVF_MASK		BIT(26)
 121#define QSPI_INTR_TX_FIFO_UNF_MASK		BIT(27)
 122#define QSPI_INTR_TX_FIFO_OVF_MASK		BIT(28)
 123#define QSPI_INTR_RDY_MASK			BIT(29)
 124#define QSPI_INTR_RX_TX_FIFO_ERR		(QSPI_INTR_RX_FIFO_UNF_MASK | \
 125						 QSPI_INTR_RX_FIFO_OVF_MASK | \
 126						 QSPI_INTR_TX_FIFO_UNF_MASK | \
 127						 QSPI_INTR_TX_FIFO_OVF_MASK)
 128
 129#define QSPI_MISC_REG                           0x194
 130#define QSPI_NUM_DUMMY_CYCLE(x)			(((x) & 0xff) << 0)
 131#define QSPI_DUMMY_CYCLES_MAX			0xff
 132
 133#define QSPI_CMB_SEQ_CMD			0x19c
 134#define QSPI_COMMAND_VALUE_SET(X)		(((x) & 0xFF) << 0)
 135
 136#define QSPI_CMB_SEQ_CMD_CFG			0x1a0
 137#define QSPI_COMMAND_X1_X2_X4(x)		(((x) & 0x3) << 13)
 138#define QSPI_COMMAND_X1_X2_X4_MASK		(0x03 << 13)
 139#define QSPI_COMMAND_SDR_DDR			BIT(12)
 140#define QSPI_COMMAND_SIZE_SET(x)		(((x) & 0xFF) << 0)
 141
 142#define QSPI_GLOBAL_CONFIG			0X1a4
 143#define QSPI_CMB_SEQ_EN				BIT(0)
 144#define QSPI_TPM_WAIT_POLL_EN			BIT(1)
 145
 146#define QSPI_CMB_SEQ_ADDR			0x1a8
 147#define QSPI_ADDRESS_VALUE_SET(X)		(((x) & 0xFFFF) << 0)
 148
 149#define QSPI_CMB_SEQ_ADDR_CFG			0x1ac
 150#define QSPI_ADDRESS_X1_X2_X4(x)		(((x) & 0x3) << 13)
 151#define QSPI_ADDRESS_X1_X2_X4_MASK		(0x03 << 13)
 152#define QSPI_ADDRESS_SDR_DDR			BIT(12)
 153#define QSPI_ADDRESS_SIZE_SET(x)		(((x) & 0xFF) << 0)
 154
 155#define DATA_DIR_TX				BIT(0)
 156#define DATA_DIR_RX				BIT(1)
 157
 158#define QSPI_DMA_TIMEOUT			(msecs_to_jiffies(1000))
 159#define DEFAULT_QSPI_DMA_BUF_LEN		(64 * 1024)
 160#define CMD_TRANSFER				0
 161#define ADDR_TRANSFER				1
 162#define DATA_TRANSFER				2
 163
 164struct tegra_qspi_soc_data {
 165	bool has_dma;
 166	bool cmb_xfer_capable;
 167	bool supports_tpm;
 168	unsigned int cs_count;
 169};
 170
 171struct tegra_qspi_client_data {
 172	int tx_clk_tap_delay;
 173	int rx_clk_tap_delay;
 174};
 175
 176struct tegra_qspi {
 177	struct device				*dev;
 178	struct spi_controller			*host;
 179	/* lock to protect data accessed by irq */
 180	spinlock_t				lock;
 181
 182	struct clk				*clk;
 183	void __iomem				*base;
 184	phys_addr_t				phys;
 185	unsigned int				irq;
 186
 187	u32					cur_speed;
 188	unsigned int				cur_pos;
 189	unsigned int				words_per_32bit;
 190	unsigned int				bytes_per_word;
 191	unsigned int				curr_dma_words;
 192	unsigned int				cur_direction;
 193
 194	unsigned int				cur_rx_pos;
 195	unsigned int				cur_tx_pos;
 196
 197	unsigned int				dma_buf_size;
 198	unsigned int				max_buf_size;
 199	bool					is_curr_dma_xfer;
 200
 201	struct completion			rx_dma_complete;
 202	struct completion			tx_dma_complete;
 203
 204	u32					tx_status;
 205	u32					rx_status;
 206	u32					status_reg;
 207	bool					is_packed;
 208	bool					use_dma;
 209
 210	u32					command1_reg;
 211	u32					dma_control_reg;
 212	u32					def_command1_reg;
 213	u32					def_command2_reg;
 214	u32					spi_cs_timing1;
 215	u32					spi_cs_timing2;
 216	u8					dummy_cycles;
 217
 218	struct completion			xfer_completion;
 219	struct spi_transfer			*curr_xfer;
 220
 221	struct dma_chan				*rx_dma_chan;
 222	u32					*rx_dma_buf;
 223	dma_addr_t				rx_dma_phys;
 224	struct dma_async_tx_descriptor		*rx_dma_desc;
 225
 226	struct dma_chan				*tx_dma_chan;
 227	u32					*tx_dma_buf;
 228	dma_addr_t				tx_dma_phys;
 229	struct dma_async_tx_descriptor		*tx_dma_desc;
 230	const struct tegra_qspi_soc_data	*soc_data;
 231};
 232
 233static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
 234{
 235	return readl(tqspi->base + offset);
 236}
 237
 238static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
 239{
 240	writel(value, tqspi->base + offset);
 241
 242	/* read back register to make sure that register writes completed */
 243	if (offset != QSPI_TX_FIFO)
 244		readl(tqspi->base + QSPI_COMMAND1);
 245}
 246
 247static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
 248{
 249	u32 value;
 250
 251	/* write 1 to clear status register */
 252	value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
 253	tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
 254
 255	value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
 256	if (!(value & QSPI_INTR_RDY_MASK)) {
 257		value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
 258		tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
 259	}
 260
 261	/* clear fifo status error if any */
 262	value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 263	if (value & QSPI_ERR)
 264		tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
 265}
 266
 267static unsigned int
 268tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
 269{
 270	unsigned int max_word, max_len, total_fifo_words;
 271	unsigned int remain_len = t->len - tqspi->cur_pos;
 272	unsigned int bits_per_word = t->bits_per_word;
 273
 274	tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
 275
 276	/*
 277	 * Tegra QSPI controller supports packed or unpacked mode transfers.
 278	 * Packed mode is used for data transfers using 8, 16, or 32 bits per
 279	 * word with a minimum transfer of 1 word and for all other transfers
 280	 * unpacked mode will be used.
 281	 */
 282
 283	if ((bits_per_word == 8 || bits_per_word == 16 ||
 284	     bits_per_word == 32) && t->len > 3) {
 285		tqspi->is_packed = true;
 286		tqspi->words_per_32bit = 32 / bits_per_word;
 287	} else {
 288		tqspi->is_packed = false;
 289		tqspi->words_per_32bit = 1;
 290	}
 291
 292	if (tqspi->is_packed) {
 293		max_len = min(remain_len, tqspi->max_buf_size);
 294		tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
 295		total_fifo_words = (max_len + 3) / 4;
 296	} else {
 297		max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
 298		max_word = min(max_word, tqspi->max_buf_size / 4);
 299		tqspi->curr_dma_words = max_word;
 300		total_fifo_words = max_word;
 301	}
 302
 303	return total_fifo_words;
 304}
 305
 306static unsigned int
 307tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 308{
 309	unsigned int written_words, fifo_words_left, count;
 310	unsigned int len, tx_empty_count, max_n_32bit, i;
 311	u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
 312	u32 fifo_status;
 313
 314	fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 315	tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
 316
 317	if (tqspi->is_packed) {
 318		fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
 319		written_words = min(fifo_words_left, tqspi->curr_dma_words);
 320		len = written_words * tqspi->bytes_per_word;
 321		max_n_32bit = DIV_ROUND_UP(len, 4);
 322		for (count = 0; count < max_n_32bit; count++) {
 323			u32 x = 0;
 324
 325			for (i = 0; (i < 4) && len; i++, len--)
 326				x |= (u32)(*tx_buf++) << (i * 8);
 327			tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
 328		}
 329
 330		tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
 331	} else {
 332		unsigned int write_bytes;
 333		u8 bytes_per_word = tqspi->bytes_per_word;
 334
 335		max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
 336		written_words = max_n_32bit;
 337		len = written_words * tqspi->bytes_per_word;
 338		if (len > t->len - tqspi->cur_pos)
 339			len = t->len - tqspi->cur_pos;
 340		write_bytes = len;
 341		for (count = 0; count < max_n_32bit; count++) {
 342			u32 x = 0;
 343
 344			for (i = 0; len && (i < bytes_per_word); i++, len--)
 345				x |= (u32)(*tx_buf++) << (i * 8);
 346			tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
 347		}
 348
 349		tqspi->cur_tx_pos += write_bytes;
 350	}
 351
 352	return written_words;
 353}
 354
 355static unsigned int
 356tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 357{
 358	u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
 359	unsigned int len, rx_full_count, count, i;
 360	unsigned int read_words = 0;
 361	u32 fifo_status, x;
 362
 363	fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 364	rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
 365	if (tqspi->is_packed) {
 366		len = tqspi->curr_dma_words * tqspi->bytes_per_word;
 367		for (count = 0; count < rx_full_count; count++) {
 368			x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
 369
 370			for (i = 0; len && (i < 4); i++, len--)
 371				*rx_buf++ = (x >> i * 8) & 0xff;
 372		}
 373
 374		read_words += tqspi->curr_dma_words;
 375		tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
 376	} else {
 377		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
 378		u8 bytes_per_word = tqspi->bytes_per_word;
 379		unsigned int read_bytes;
 380
 381		len = rx_full_count * bytes_per_word;
 382		if (len > t->len - tqspi->cur_pos)
 383			len = t->len - tqspi->cur_pos;
 384		read_bytes = len;
 385		for (count = 0; count < rx_full_count; count++) {
 386			x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
 387
 388			for (i = 0; len && (i < bytes_per_word); i++, len--)
 389				*rx_buf++ = (x >> (i * 8)) & 0xff;
 390		}
 391
 392		read_words += rx_full_count;
 393		tqspi->cur_rx_pos += read_bytes;
 394	}
 395
 396	return read_words;
 397}
 398
 399static void
 400tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 401{
 402	dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
 403				tqspi->dma_buf_size, DMA_TO_DEVICE);
 404
 405	/*
 406	 * In packed mode, each word in FIFO may contain multiple packets
 407	 * based on bits per word. So all bytes in each FIFO word are valid.
 408	 *
 409	 * In unpacked mode, each word in FIFO contains single packet and
 410	 * based on bits per word any remaining bits in FIFO word will be
 411	 * ignored by the hardware and are invalid bits.
 412	 */
 413	if (tqspi->is_packed) {
 414		tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
 415	} else {
 416		u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
 417		unsigned int i, count, consume, write_bytes;
 418
 419		/*
 420		 * Fill tx_dma_buf to contain single packet in each word based
 421		 * on bits per word from SPI core tx_buf.
 422		 */
 423		consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
 424		if (consume > t->len - tqspi->cur_pos)
 425			consume = t->len - tqspi->cur_pos;
 426		write_bytes = consume;
 427		for (count = 0; count < tqspi->curr_dma_words; count++) {
 428			u32 x = 0;
 429
 430			for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
 431				x |= (u32)(*tx_buf++) << (i * 8);
 432			tqspi->tx_dma_buf[count] = x;
 433		}
 434
 435		tqspi->cur_tx_pos += write_bytes;
 436	}
 437
 438	dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
 439				   tqspi->dma_buf_size, DMA_TO_DEVICE);
 440}
 441
 442static void
 443tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 444{
 445	dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
 446				tqspi->dma_buf_size, DMA_FROM_DEVICE);
 447
 448	if (tqspi->is_packed) {
 449		tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
 450	} else {
 451		unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
 452		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
 453		unsigned int i, count, consume, read_bytes;
 454
 455		/*
 456		 * Each FIFO word contains single data packet.
 457		 * Skip invalid bits in each FIFO word based on bits per word
 458		 * and align bytes while filling in SPI core rx_buf.
 459		 */
 460		consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
 461		if (consume > t->len - tqspi->cur_pos)
 462			consume = t->len - tqspi->cur_pos;
 463		read_bytes = consume;
 464		for (count = 0; count < tqspi->curr_dma_words; count++) {
 465			u32 x = tqspi->rx_dma_buf[count] & rx_mask;
 466
 467			for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
 468				*rx_buf++ = (x >> (i * 8)) & 0xff;
 469		}
 470
 471		tqspi->cur_rx_pos += read_bytes;
 472	}
 473
 474	dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
 475				   tqspi->dma_buf_size, DMA_FROM_DEVICE);
 476}
 477
 478static void tegra_qspi_dma_complete(void *args)
 479{
 480	struct completion *dma_complete = args;
 481
 482	complete(dma_complete);
 483}
 484
 485static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
 486{
 487	dma_addr_t tx_dma_phys;
 488
 489	reinit_completion(&tqspi->tx_dma_complete);
 490
 491	if (tqspi->is_packed)
 492		tx_dma_phys = t->tx_dma;
 493	else
 494		tx_dma_phys = tqspi->tx_dma_phys;
 495
 496	tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
 497							 len, DMA_MEM_TO_DEV,
 498							 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
 499
 500	if (!tqspi->tx_dma_desc) {
 501		dev_err(tqspi->dev, "Unable to get TX descriptor\n");
 502		return -EIO;
 503	}
 504
 505	tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
 506	tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
 507	dmaengine_submit(tqspi->tx_dma_desc);
 508	dma_async_issue_pending(tqspi->tx_dma_chan);
 509
 510	return 0;
 511}
 512
 513static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
 514{
 515	dma_addr_t rx_dma_phys;
 516
 517	reinit_completion(&tqspi->rx_dma_complete);
 518
 519	if (tqspi->is_packed)
 520		rx_dma_phys = t->rx_dma;
 521	else
 522		rx_dma_phys = tqspi->rx_dma_phys;
 523
 524	tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
 525							 len, DMA_DEV_TO_MEM,
 526							 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
 527
 528	if (!tqspi->rx_dma_desc) {
 529		dev_err(tqspi->dev, "Unable to get RX descriptor\n");
 530		return -EIO;
 531	}
 532
 533	tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
 534	tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
 535	dmaengine_submit(tqspi->rx_dma_desc);
 536	dma_async_issue_pending(tqspi->rx_dma_chan);
 537
 538	return 0;
 539}
 540
 541static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
 542{
 543	void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
 544	u32 val;
 545
 546	val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 547	if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
 548		return 0;
 549
 550	val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
 551	tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
 552
 553	if (!atomic)
 554		return readl_relaxed_poll_timeout(addr, val,
 555						  (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
 556						  1000, 1000000);
 557
 558	return readl_relaxed_poll_timeout_atomic(addr, val,
 559						 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
 560						 1000, 1000000);
 561}
 562
 563static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
 564{
 565	u32 intr_mask;
 566
 567	intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
 568	intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
 569	tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
 570}
 571
 572static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 573{
 574	u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
 575	u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
 576	unsigned int len;
 577
 578	len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 579
 580	if (t->tx_buf) {
 581		t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
 582		if (dma_mapping_error(tqspi->dev, t->tx_dma))
 583			return -ENOMEM;
 584	}
 585
 586	if (t->rx_buf) {
 587		t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
 588		if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
 589			dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
 590			return -ENOMEM;
 591		}
 592	}
 593
 594	return 0;
 595}
 596
 597static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 598{
 599	unsigned int len;
 600
 601	len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 602
 603	dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
 604	dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
 605}
 606
 607static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 608{
 609	struct dma_slave_config dma_sconfig = { 0 };
 610	unsigned int len;
 611	u8 dma_burst;
 612	int ret = 0;
 613	u32 val;
 614
 615	if (tqspi->is_packed) {
 616		ret = tegra_qspi_dma_map_xfer(tqspi, t);
 617		if (ret < 0)
 618			return ret;
 619	}
 620
 621	val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
 622	tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
 623
 624	tegra_qspi_unmask_irq(tqspi);
 625
 626	if (tqspi->is_packed)
 627		len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 628	else
 629		len = tqspi->curr_dma_words * 4;
 630
 631	/* set attention level based on length of transfer */
 632	val = 0;
 633	if (len & 0xf) {
 634		val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
 635		dma_burst = 1;
 636	} else if (((len) >> 4) & 0x1) {
 637		val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
 638		dma_burst = 4;
 639	} else {
 640		val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
 641		dma_burst = 8;
 642	}
 643
 644	tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
 645	tqspi->dma_control_reg = val;
 646
 647	dma_sconfig.device_fc = true;
 648	if (tqspi->cur_direction & DATA_DIR_TX) {
 649		dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
 650		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 651		dma_sconfig.dst_maxburst = dma_burst;
 652		ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
 653		if (ret < 0) {
 654			dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
 655			return ret;
 656		}
 657
 658		tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
 659		ret = tegra_qspi_start_tx_dma(tqspi, t, len);
 660		if (ret < 0) {
 661			dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
 662			return ret;
 663		}
 664	}
 665
 666	if (tqspi->cur_direction & DATA_DIR_RX) {
 667		dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
 668		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 669		dma_sconfig.src_maxburst = dma_burst;
 670		ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
 671		if (ret < 0) {
 672			dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
 673			return ret;
 674		}
 675
 676		dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
 677					   tqspi->dma_buf_size,
 678					   DMA_FROM_DEVICE);
 679
 680		ret = tegra_qspi_start_rx_dma(tqspi, t, len);
 681		if (ret < 0) {
 682			dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
 683			if (tqspi->cur_direction & DATA_DIR_TX)
 684				dmaengine_terminate_all(tqspi->tx_dma_chan);
 685			return ret;
 686		}
 687	}
 688
 689	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
 690
 691	tqspi->is_curr_dma_xfer = true;
 692	tqspi->dma_control_reg = val;
 693	val |= QSPI_DMA_EN;
 694	tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
 695
 696	return ret;
 697}
 698
 699static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
 700{
 701	u32 val;
 702	unsigned int cur_words;
 703
 704	if (qspi->cur_direction & DATA_DIR_TX)
 705		cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
 706	else
 707		cur_words = qspi->curr_dma_words;
 708
 709	val = QSPI_DMA_BLK_SET(cur_words - 1);
 710	tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
 711
 712	tegra_qspi_unmask_irq(qspi);
 713
 714	qspi->is_curr_dma_xfer = false;
 715	val = qspi->command1_reg;
 716	val |= QSPI_PIO;
 717	tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
 718
 719	return 0;
 720}
 721
 722static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
 723{
 724	if (!tqspi->soc_data->has_dma)
 725		return;
 726
 727	if (tqspi->tx_dma_buf) {
 728		dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
 729				  tqspi->tx_dma_buf, tqspi->tx_dma_phys);
 730		tqspi->tx_dma_buf = NULL;
 731	}
 732
 733	if (tqspi->tx_dma_chan) {
 734		dma_release_channel(tqspi->tx_dma_chan);
 735		tqspi->tx_dma_chan = NULL;
 736	}
 737
 738	if (tqspi->rx_dma_buf) {
 739		dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
 740				  tqspi->rx_dma_buf, tqspi->rx_dma_phys);
 741		tqspi->rx_dma_buf = NULL;
 742	}
 743
 744	if (tqspi->rx_dma_chan) {
 745		dma_release_channel(tqspi->rx_dma_chan);
 746		tqspi->rx_dma_chan = NULL;
 747	}
 748}
 749
 750static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
 751{
 752	struct dma_chan *dma_chan;
 753	dma_addr_t dma_phys;
 754	u32 *dma_buf;
 755	int err;
 756
 757	if (!tqspi->soc_data->has_dma)
 758		return 0;
 759
 760	dma_chan = dma_request_chan(tqspi->dev, "rx");
 761	if (IS_ERR(dma_chan)) {
 762		err = PTR_ERR(dma_chan);
 763		goto err_out;
 764	}
 765
 766	tqspi->rx_dma_chan = dma_chan;
 767
 768	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
 769	if (!dma_buf) {
 770		err = -ENOMEM;
 771		goto err_out;
 772	}
 773
 774	tqspi->rx_dma_buf = dma_buf;
 775	tqspi->rx_dma_phys = dma_phys;
 776
 777	dma_chan = dma_request_chan(tqspi->dev, "tx");
 778	if (IS_ERR(dma_chan)) {
 779		err = PTR_ERR(dma_chan);
 780		goto err_out;
 781	}
 782
 783	tqspi->tx_dma_chan = dma_chan;
 784
 785	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
 786	if (!dma_buf) {
 787		err = -ENOMEM;
 788		goto err_out;
 789	}
 790
 791	tqspi->tx_dma_buf = dma_buf;
 792	tqspi->tx_dma_phys = dma_phys;
 793	tqspi->use_dma = true;
 794
 795	return 0;
 796
 797err_out:
 798	tegra_qspi_deinit_dma(tqspi);
 799
 800	if (err != -EPROBE_DEFER) {
 801		dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
 802		dev_err(tqspi->dev, "falling back to PIO\n");
 803		return 0;
 804	}
 805
 806	return err;
 807}
 808
 809static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
 810					 bool is_first_of_msg)
 811{
 812	struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
 813	struct tegra_qspi_client_data *cdata = spi->controller_data;
 814	u32 command1, command2, speed = t->speed_hz;
 815	u8 bits_per_word = t->bits_per_word;
 816	u32 tx_tap = 0, rx_tap = 0;
 817	int req_mode;
 818
 819	if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
 820		clk_set_rate(tqspi->clk, speed);
 821		tqspi->cur_speed = speed;
 822	}
 823
 824	tqspi->cur_pos = 0;
 825	tqspi->cur_rx_pos = 0;
 826	tqspi->cur_tx_pos = 0;
 827	tqspi->curr_xfer = t;
 828
 829	if (is_first_of_msg) {
 830		tegra_qspi_mask_clear_irq(tqspi);
 831
 832		command1 = tqspi->def_command1_reg;
 833		command1 |= QSPI_CS_SEL(spi_get_chipselect(spi, 0));
 834		command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
 835
 836		command1 &= ~QSPI_CONTROL_MODE_MASK;
 837		req_mode = spi->mode & 0x3;
 838		if (req_mode == SPI_MODE_3)
 839			command1 |= QSPI_CONTROL_MODE_3;
 840		else
 841			command1 |= QSPI_CONTROL_MODE_0;
 842
 843		if (spi->mode & SPI_CS_HIGH)
 844			command1 |= QSPI_CS_SW_VAL;
 845		else
 846			command1 &= ~QSPI_CS_SW_VAL;
 847		tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
 848
 849		if (cdata && cdata->tx_clk_tap_delay)
 850			tx_tap = cdata->tx_clk_tap_delay;
 851
 852		if (cdata && cdata->rx_clk_tap_delay)
 853			rx_tap = cdata->rx_clk_tap_delay;
 854
 855		command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
 856		if (command2 != tqspi->def_command2_reg)
 857			tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
 858
 859	} else {
 860		command1 = tqspi->command1_reg;
 861		command1 &= ~QSPI_BIT_LENGTH(~0);
 862		command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
 863	}
 864
 865	command1 &= ~QSPI_SDR_DDR_SEL;
 866
 867	return command1;
 868}
 869
 870static int tegra_qspi_start_transfer_one(struct spi_device *spi,
 871					 struct spi_transfer *t, u32 command1)
 872{
 873	struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
 874	unsigned int total_fifo_words;
 875	u8 bus_width = 0;
 876	int ret;
 877
 878	total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
 879
 880	command1 &= ~QSPI_PACKED;
 881	if (tqspi->is_packed)
 882		command1 |= QSPI_PACKED;
 883	tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
 884
 885	tqspi->cur_direction = 0;
 886
 887	command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
 888	if (t->rx_buf) {
 889		command1 |= QSPI_RX_EN;
 890		tqspi->cur_direction |= DATA_DIR_RX;
 891		bus_width = t->rx_nbits;
 892	}
 893
 894	if (t->tx_buf) {
 895		command1 |= QSPI_TX_EN;
 896		tqspi->cur_direction |= DATA_DIR_TX;
 897		bus_width = t->tx_nbits;
 898	}
 899
 900	command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
 901
 902	if (bus_width == SPI_NBITS_QUAD)
 903		command1 |= QSPI_INTERFACE_WIDTH_QUAD;
 904	else if (bus_width == SPI_NBITS_DUAL)
 905		command1 |= QSPI_INTERFACE_WIDTH_DUAL;
 906	else
 907		command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
 908
 909	tqspi->command1_reg = command1;
 910
 911	tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
 912
 913	ret = tegra_qspi_flush_fifos(tqspi, false);
 914	if (ret < 0)
 915		return ret;
 916
 917	if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
 918		ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
 919	else
 920		ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
 921
 922	return ret;
 923}
 924
 925static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
 926{
 927	struct tegra_qspi_client_data *cdata;
 928	struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
 929
 930	cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
 931	if (!cdata)
 932		return NULL;
 933
 934	device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
 935				 &cdata->tx_clk_tap_delay);
 936	device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
 937				 &cdata->rx_clk_tap_delay);
 938
 939	return cdata;
 940}
 941
 942static int tegra_qspi_setup(struct spi_device *spi)
 943{
 944	struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
 945	struct tegra_qspi_client_data *cdata = spi->controller_data;
 946	unsigned long flags;
 947	u32 val;
 948	int ret;
 949
 950	ret = pm_runtime_resume_and_get(tqspi->dev);
 951	if (ret < 0) {
 952		dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
 953		return ret;
 954	}
 955
 956	if (!cdata) {
 957		cdata = tegra_qspi_parse_cdata_dt(spi);
 958		spi->controller_data = cdata;
 959	}
 960	spin_lock_irqsave(&tqspi->lock, flags);
 961
 962	/* keep default cs state to inactive */
 963	val = tqspi->def_command1_reg;
 964	val |= QSPI_CS_SEL(spi_get_chipselect(spi, 0));
 965	if (spi->mode & SPI_CS_HIGH)
 966		val &= ~QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
 967	else
 968		val |= QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
 969
 970	tqspi->def_command1_reg = val;
 971	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
 972
 973	spin_unlock_irqrestore(&tqspi->lock, flags);
 974
 975	pm_runtime_put(tqspi->dev);
 976
 977	return 0;
 978}
 979
 980static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
 981{
 982	dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
 983	dev_dbg(tqspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
 984		tegra_qspi_readl(tqspi, QSPI_COMMAND1),
 985		tegra_qspi_readl(tqspi, QSPI_COMMAND2));
 986	dev_dbg(tqspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
 987		tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
 988		tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
 989	dev_dbg(tqspi->dev, "INTR_MASK:  0x%08x | MISC: 0x%08x\n",
 990		tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
 991		tegra_qspi_readl(tqspi, QSPI_MISC_REG));
 992	dev_dbg(tqspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
 993		tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
 994		tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
 995}
 996
 997static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
 998{
 999	dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
1000	tegra_qspi_dump_regs(tqspi);
1001	tegra_qspi_flush_fifos(tqspi, true);
1002	if (device_reset(tqspi->dev) < 0)
1003		dev_warn_once(tqspi->dev, "device reset failed\n");
1004}
1005
1006static void tegra_qspi_transfer_end(struct spi_device *spi)
1007{
1008	struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
1009	int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1010
1011	if (cs_val)
1012		tqspi->command1_reg |= QSPI_CS_SW_VAL;
1013	else
1014		tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
1015	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1016	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1017}
1018
1019static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
1020{
1021	u32 cmd_config = 0;
1022
1023	/* Extract Command configuration and value */
1024	if (is_ddr)
1025		cmd_config |= QSPI_COMMAND_SDR_DDR;
1026	else
1027		cmd_config &= ~QSPI_COMMAND_SDR_DDR;
1028
1029	cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
1030	cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
1031
1032	return cmd_config;
1033}
1034
1035static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
1036{
1037	u32 addr_config = 0;
1038
1039	/* Extract Address configuration and value */
1040	is_ddr = 0; //Only SDR mode supported
1041	bus_width = 0; //X1 mode
1042
1043	if (is_ddr)
1044		addr_config |= QSPI_ADDRESS_SDR_DDR;
1045	else
1046		addr_config &= ~QSPI_ADDRESS_SDR_DDR;
1047
1048	addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
1049	addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
1050
1051	return addr_config;
1052}
1053
1054static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
1055					struct spi_message *msg)
1056{
1057	bool is_first_msg = true;
1058	struct spi_transfer *xfer;
1059	struct spi_device *spi = msg->spi;
1060	u8 transfer_phase = 0;
1061	u32 cmd1 = 0, dma_ctl = 0;
1062	int ret = 0;
1063	u32 address_value = 0;
1064	u32 cmd_config = 0, addr_config = 0;
1065	u8 cmd_value = 0, val = 0;
1066
1067	/* Enable Combined sequence mode */
1068	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1069	if (spi->mode & SPI_TPM_HW_FLOW) {
1070		if (tqspi->soc_data->supports_tpm)
1071			val |= QSPI_TPM_WAIT_POLL_EN;
1072		else
1073			return -EIO;
1074	}
1075	val |= QSPI_CMB_SEQ_EN;
1076	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1077	/* Process individual transfer list */
1078	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1079		switch (transfer_phase) {
1080		case CMD_TRANSFER:
1081			/* X1 SDR mode */
1082			cmd_config = tegra_qspi_cmd_config(false, 0,
1083							   xfer->len);
1084			cmd_value = *((const u8 *)(xfer->tx_buf));
1085			break;
1086		case ADDR_TRANSFER:
1087			/* X1 SDR mode */
1088			addr_config = tegra_qspi_addr_config(false, 0,
1089							     xfer->len);
1090			address_value = *((const u32 *)(xfer->tx_buf));
1091			break;
1092		case DATA_TRANSFER:
1093			/* Program Command, Address value in register */
1094			tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
1095			tegra_qspi_writel(tqspi, address_value,
1096					  QSPI_CMB_SEQ_ADDR);
1097			/* Program Command and Address config in register */
1098			tegra_qspi_writel(tqspi, cmd_config,
1099					  QSPI_CMB_SEQ_CMD_CFG);
1100			tegra_qspi_writel(tqspi, addr_config,
1101					  QSPI_CMB_SEQ_ADDR_CFG);
1102
1103			reinit_completion(&tqspi->xfer_completion);
1104			cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
1105							     is_first_msg);
1106			ret = tegra_qspi_start_transfer_one(spi, xfer,
1107							    cmd1);
1108
1109			if (ret < 0) {
1110				dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
1111					ret);
1112				return ret;
1113			}
1114
1115			is_first_msg = false;
1116			ret = wait_for_completion_timeout
1117					(&tqspi->xfer_completion,
1118					QSPI_DMA_TIMEOUT);
1119
1120			if (WARN_ON(ret == 0)) {
1121				dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
1122					ret);
1123				if (tqspi->is_curr_dma_xfer &&
1124				    (tqspi->cur_direction & DATA_DIR_TX))
1125					dmaengine_terminate_all
1126						(tqspi->tx_dma_chan);
1127
1128				if (tqspi->is_curr_dma_xfer &&
1129				    (tqspi->cur_direction & DATA_DIR_RX))
1130					dmaengine_terminate_all
1131						(tqspi->rx_dma_chan);
1132
1133				/* Abort transfer by resetting pio/dma bit */
1134				if (!tqspi->is_curr_dma_xfer) {
1135					cmd1 = tegra_qspi_readl
1136							(tqspi,
1137							 QSPI_COMMAND1);
1138					cmd1 &= ~QSPI_PIO;
1139					tegra_qspi_writel
1140							(tqspi, cmd1,
1141							 QSPI_COMMAND1);
1142				} else {
1143					dma_ctl = tegra_qspi_readl
1144							(tqspi,
1145							 QSPI_DMA_CTL);
1146					dma_ctl &= ~QSPI_DMA_EN;
1147					tegra_qspi_writel(tqspi, dma_ctl,
1148							  QSPI_DMA_CTL);
1149				}
1150
1151				/* Reset controller if timeout happens */
1152				if (device_reset(tqspi->dev) < 0)
1153					dev_warn_once(tqspi->dev,
1154						      "device reset failed\n");
1155				ret = -EIO;
1156				goto exit;
1157			}
1158
1159			if (tqspi->tx_status ||  tqspi->rx_status) {
1160				dev_err(tqspi->dev, "QSPI Transfer failed\n");
1161				tqspi->tx_status = 0;
1162				tqspi->rx_status = 0;
1163				ret = -EIO;
1164				goto exit;
1165			}
1166			if (!xfer->cs_change) {
1167				tegra_qspi_transfer_end(spi);
1168				spi_transfer_delay_exec(xfer);
1169			}
1170			break;
1171		default:
1172			ret = -EINVAL;
1173			goto exit;
1174		}
1175		msg->actual_length += xfer->len;
1176		transfer_phase++;
1177	}
 
 
 
 
1178	ret = 0;
1179
1180exit:
1181	msg->status = ret;
1182	if (ret < 0) {
1183		tegra_qspi_transfer_end(spi);
1184		spi_transfer_delay_exec(xfer);
1185	}
1186
1187	return ret;
1188}
1189
1190static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
1191					    struct spi_message *msg)
1192{
1193	struct spi_device *spi = msg->spi;
1194	struct spi_transfer *transfer;
1195	bool is_first_msg = true;
1196	int ret = 0, val = 0;
1197
1198	msg->status = 0;
1199	msg->actual_length = 0;
1200	tqspi->tx_status = 0;
1201	tqspi->rx_status = 0;
1202
1203	/* Disable Combined sequence mode */
1204	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1205	val &= ~QSPI_CMB_SEQ_EN;
1206	if (tqspi->soc_data->supports_tpm)
1207		val &= ~QSPI_TPM_WAIT_POLL_EN;
1208	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1209	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
1210		struct spi_transfer *xfer = transfer;
1211		u8 dummy_bytes = 0;
1212		u32 cmd1;
1213
1214		tqspi->dummy_cycles = 0;
1215		/*
1216		 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
1217		 * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
1218		 * So, check if the next transfer is dummy data transfer and program dummy
1219		 * clock cycles along with the current transfer and skip next transfer.
1220		 */
1221		if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
1222			struct spi_transfer *next_xfer;
1223
1224			next_xfer = list_next_entry(xfer, transfer_list);
1225			if (next_xfer->dummy_data) {
1226				u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
1227
1228				if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
1229					tqspi->dummy_cycles = dummy_cycles;
1230					dummy_bytes = next_xfer->len;
1231					transfer = next_xfer;
1232				}
1233			}
1234		}
1235
1236		reinit_completion(&tqspi->xfer_completion);
1237
1238		cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
1239
1240		ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
1241		if (ret < 0) {
1242			dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
1243			goto complete_xfer;
1244		}
1245
1246		ret = wait_for_completion_timeout(&tqspi->xfer_completion,
1247						  QSPI_DMA_TIMEOUT);
1248		if (WARN_ON(ret == 0)) {
1249			dev_err(tqspi->dev, "transfer timeout\n");
1250			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
1251				dmaengine_terminate_all(tqspi->tx_dma_chan);
1252			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
1253				dmaengine_terminate_all(tqspi->rx_dma_chan);
1254			tegra_qspi_handle_error(tqspi);
1255			ret = -EIO;
1256			goto complete_xfer;
1257		}
1258
1259		if (tqspi->tx_status ||  tqspi->rx_status) {
1260			tegra_qspi_handle_error(tqspi);
1261			ret = -EIO;
1262			goto complete_xfer;
1263		}
1264
1265		msg->actual_length += xfer->len + dummy_bytes;
1266
1267complete_xfer:
1268		if (ret < 0) {
1269			tegra_qspi_transfer_end(spi);
1270			spi_transfer_delay_exec(xfer);
1271			goto exit;
1272		}
1273
1274		if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
1275			/* de-activate CS after last transfer only when cs_change is not set */
1276			if (!xfer->cs_change) {
1277				tegra_qspi_transfer_end(spi);
1278				spi_transfer_delay_exec(xfer);
1279			}
1280		} else if (xfer->cs_change) {
1281			 /* de-activated CS between the transfers only when cs_change is set */
1282			tegra_qspi_transfer_end(spi);
1283			spi_transfer_delay_exec(xfer);
1284		}
1285	}
1286
1287	ret = 0;
1288exit:
1289	msg->status = ret;
1290
1291	return ret;
1292}
1293
1294static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
1295					struct spi_message *msg)
1296{
1297	int transfer_count = 0;
1298	struct spi_transfer *xfer;
1299
1300	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1301		transfer_count++;
1302	}
1303	if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
1304		return false;
1305	xfer = list_first_entry(&msg->transfers, typeof(*xfer),
1306				transfer_list);
1307	if (xfer->len > 2)
1308		return false;
1309	xfer = list_next_entry(xfer, transfer_list);
1310	if (xfer->len > 4 || xfer->len < 3)
1311		return false;
1312	xfer = list_next_entry(xfer, transfer_list);
1313	if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
1314		return false;
1315
1316	return true;
1317}
1318
1319static int tegra_qspi_transfer_one_message(struct spi_controller *host,
1320					   struct spi_message *msg)
1321{
1322	struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1323	int ret;
1324
1325	if (tegra_qspi_validate_cmb_seq(tqspi, msg))
1326		ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
1327	else
1328		ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
1329
1330	spi_finalize_current_message(host);
1331
1332	return ret;
1333}
1334
1335static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
1336{
1337	struct spi_transfer *t = tqspi->curr_xfer;
1338	unsigned long flags;
1339
1340	spin_lock_irqsave(&tqspi->lock, flags);
1341
1342	if (tqspi->tx_status ||  tqspi->rx_status) {
1343		tegra_qspi_handle_error(tqspi);
1344		complete(&tqspi->xfer_completion);
1345		goto exit;
1346	}
1347
1348	if (tqspi->cur_direction & DATA_DIR_RX)
1349		tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
1350
1351	if (tqspi->cur_direction & DATA_DIR_TX)
1352		tqspi->cur_pos = tqspi->cur_tx_pos;
1353	else
1354		tqspi->cur_pos = tqspi->cur_rx_pos;
1355
1356	if (tqspi->cur_pos == t->len) {
1357		complete(&tqspi->xfer_completion);
1358		goto exit;
1359	}
1360
1361	tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1362	tegra_qspi_start_cpu_based_transfer(tqspi, t);
1363exit:
1364	spin_unlock_irqrestore(&tqspi->lock, flags);
1365	return IRQ_HANDLED;
1366}
1367
1368static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1369{
1370	struct spi_transfer *t = tqspi->curr_xfer;
1371	unsigned int total_fifo_words;
1372	unsigned long flags;
1373	long wait_status;
1374	int err = 0;
1375
1376	if (tqspi->cur_direction & DATA_DIR_TX) {
1377		if (tqspi->tx_status) {
1378			dmaengine_terminate_all(tqspi->tx_dma_chan);
1379			err += 1;
1380		} else {
1381			wait_status = wait_for_completion_interruptible_timeout(
1382				&tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
1383			if (wait_status <= 0) {
1384				dmaengine_terminate_all(tqspi->tx_dma_chan);
1385				dev_err(tqspi->dev, "failed TX DMA transfer\n");
1386				err += 1;
1387			}
1388		}
1389	}
1390
1391	if (tqspi->cur_direction & DATA_DIR_RX) {
1392		if (tqspi->rx_status) {
1393			dmaengine_terminate_all(tqspi->rx_dma_chan);
1394			err += 2;
1395		} else {
1396			wait_status = wait_for_completion_interruptible_timeout(
1397				&tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
1398			if (wait_status <= 0) {
1399				dmaengine_terminate_all(tqspi->rx_dma_chan);
1400				dev_err(tqspi->dev, "failed RX DMA transfer\n");
1401				err += 2;
1402			}
1403		}
1404	}
1405
1406	spin_lock_irqsave(&tqspi->lock, flags);
1407
1408	if (err) {
1409		tegra_qspi_dma_unmap_xfer(tqspi, t);
1410		tegra_qspi_handle_error(tqspi);
1411		complete(&tqspi->xfer_completion);
1412		goto exit;
1413	}
1414
1415	if (tqspi->cur_direction & DATA_DIR_RX)
1416		tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
1417
1418	if (tqspi->cur_direction & DATA_DIR_TX)
1419		tqspi->cur_pos = tqspi->cur_tx_pos;
1420	else
1421		tqspi->cur_pos = tqspi->cur_rx_pos;
1422
1423	if (tqspi->cur_pos == t->len) {
1424		tegra_qspi_dma_unmap_xfer(tqspi, t);
1425		complete(&tqspi->xfer_completion);
1426		goto exit;
1427	}
1428
1429	tegra_qspi_dma_unmap_xfer(tqspi, t);
1430
1431	/* continue transfer in current message */
1432	total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1433	if (total_fifo_words > QSPI_FIFO_DEPTH)
1434		err = tegra_qspi_start_dma_based_transfer(tqspi, t);
1435	else
1436		err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1437
1438exit:
1439	spin_unlock_irqrestore(&tqspi->lock, flags);
1440	return IRQ_HANDLED;
1441}
1442
1443static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1444{
1445	struct tegra_qspi *tqspi = context_data;
1446
1447	tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
1448
1449	if (tqspi->cur_direction & DATA_DIR_TX)
1450		tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
1451
1452	if (tqspi->cur_direction & DATA_DIR_RX)
1453		tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
1454
1455	tegra_qspi_mask_clear_irq(tqspi);
1456
1457	if (!tqspi->is_curr_dma_xfer)
1458		return handle_cpu_based_xfer(tqspi);
1459
1460	return handle_dma_based_xfer(tqspi);
1461}
1462
1463static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
1464	.has_dma = true,
1465	.cmb_xfer_capable = false,
1466	.supports_tpm = false,
1467	.cs_count = 1,
1468};
1469
1470static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
1471	.has_dma = true,
1472	.cmb_xfer_capable = true,
1473	.supports_tpm = false,
1474	.cs_count = 1,
1475};
1476
1477static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
1478	.has_dma = false,
1479	.cmb_xfer_capable = true,
1480	.supports_tpm = true,
1481	.cs_count = 1,
1482};
1483
1484static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
1485	.has_dma = false,
1486	.cmb_xfer_capable = true,
1487	.supports_tpm = true,
1488	.cs_count = 4,
1489};
1490
1491static const struct of_device_id tegra_qspi_of_match[] = {
1492	{
1493		.compatible = "nvidia,tegra210-qspi",
1494		.data	    = &tegra210_qspi_soc_data,
1495	}, {
1496		.compatible = "nvidia,tegra186-qspi",
1497		.data	    = &tegra186_qspi_soc_data,
1498	}, {
1499		.compatible = "nvidia,tegra194-qspi",
1500		.data	    = &tegra186_qspi_soc_data,
1501	}, {
1502		.compatible = "nvidia,tegra234-qspi",
1503		.data	    = &tegra234_qspi_soc_data,
1504	}, {
1505		.compatible = "nvidia,tegra241-qspi",
1506		.data	    = &tegra241_qspi_soc_data,
1507	},
1508	{}
1509};
1510
1511MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
1512
1513#ifdef CONFIG_ACPI
1514static const struct acpi_device_id tegra_qspi_acpi_match[] = {
1515	{
1516		.id = "NVDA1213",
1517		.driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
1518	}, {
1519		.id = "NVDA1313",
1520		.driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
1521	}, {
1522		.id = "NVDA1413",
1523		.driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
1524	}, {
1525		.id = "NVDA1513",
1526		.driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
1527	},
1528	{}
1529};
1530
1531MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
1532#endif
1533
1534static int tegra_qspi_probe(struct platform_device *pdev)
1535{
1536	struct spi_controller	*host;
1537	struct tegra_qspi	*tqspi;
1538	struct resource		*r;
1539	int ret, qspi_irq;
1540	int bus_num;
1541
1542	host = devm_spi_alloc_host(&pdev->dev, sizeof(*tqspi));
1543	if (!host)
1544		return -ENOMEM;
1545
1546	platform_set_drvdata(pdev, host);
1547	tqspi = spi_controller_get_devdata(host);
1548
1549	host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
1550			  SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
1551	host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
1552	host->flags = SPI_CONTROLLER_HALF_DUPLEX;
1553	host->setup = tegra_qspi_setup;
1554	host->transfer_one_message = tegra_qspi_transfer_one_message;
1555	host->num_chipselect = 1;
1556	host->auto_runtime_pm = true;
1557
1558	bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1559	if (bus_num >= 0)
1560		host->bus_num = bus_num;
1561
1562	tqspi->host = host;
1563	tqspi->dev = &pdev->dev;
1564	spin_lock_init(&tqspi->lock);
1565
1566	tqspi->soc_data = device_get_match_data(&pdev->dev);
1567	host->num_chipselect = tqspi->soc_data->cs_count;
1568	tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
 
1569	if (IS_ERR(tqspi->base))
1570		return PTR_ERR(tqspi->base);
1571
1572	tqspi->phys = r->start;
1573	qspi_irq = platform_get_irq(pdev, 0);
1574	if (qspi_irq < 0)
1575		return qspi_irq;
1576	tqspi->irq = qspi_irq;
1577
1578	if (!has_acpi_companion(tqspi->dev)) {
1579		tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
1580		if (IS_ERR(tqspi->clk)) {
1581			ret = PTR_ERR(tqspi->clk);
1582			dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
1583			return ret;
1584		}
1585
1586	}
1587
1588	tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
1589	tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
1590
1591	ret = tegra_qspi_init_dma(tqspi);
1592	if (ret < 0)
1593		return ret;
1594
1595	if (tqspi->use_dma)
1596		tqspi->max_buf_size = tqspi->dma_buf_size;
1597
1598	init_completion(&tqspi->tx_dma_complete);
1599	init_completion(&tqspi->rx_dma_complete);
1600	init_completion(&tqspi->xfer_completion);
1601
1602	pm_runtime_enable(&pdev->dev);
1603	ret = pm_runtime_resume_and_get(&pdev->dev);
1604	if (ret < 0) {
1605		dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
1606		goto exit_pm_disable;
1607	}
1608
1609	if (device_reset(tqspi->dev) < 0)
1610		dev_warn_once(tqspi->dev, "device reset failed\n");
1611
1612	tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW |  QSPI_CS_SW_VAL;
1613	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1614	tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
1615	tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
1616	tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
1617
1618	pm_runtime_put(&pdev->dev);
1619
1620	ret = request_threaded_irq(tqspi->irq, NULL,
1621				   tegra_qspi_isr_thread, IRQF_ONESHOT,
1622				   dev_name(&pdev->dev), tqspi);
1623	if (ret < 0) {
1624		dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
1625		goto exit_pm_disable;
1626	}
1627
1628	host->dev.of_node = pdev->dev.of_node;
1629	ret = spi_register_controller(host);
1630	if (ret < 0) {
1631		dev_err(&pdev->dev, "failed to register host: %d\n", ret);
1632		goto exit_free_irq;
1633	}
1634
1635	return 0;
1636
1637exit_free_irq:
1638	free_irq(qspi_irq, tqspi);
1639exit_pm_disable:
1640	pm_runtime_force_suspend(&pdev->dev);
1641	tegra_qspi_deinit_dma(tqspi);
1642	return ret;
1643}
1644
1645static void tegra_qspi_remove(struct platform_device *pdev)
1646{
1647	struct spi_controller *host = platform_get_drvdata(pdev);
1648	struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1649
1650	spi_unregister_controller(host);
1651	free_irq(tqspi->irq, tqspi);
1652	pm_runtime_force_suspend(&pdev->dev);
1653	tegra_qspi_deinit_dma(tqspi);
 
 
1654}
1655
1656static int __maybe_unused tegra_qspi_suspend(struct device *dev)
1657{
1658	struct spi_controller *host = dev_get_drvdata(dev);
1659
1660	return spi_controller_suspend(host);
1661}
1662
1663static int __maybe_unused tegra_qspi_resume(struct device *dev)
1664{
1665	struct spi_controller *host = dev_get_drvdata(dev);
1666	struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1667	int ret;
1668
1669	ret = pm_runtime_resume_and_get(dev);
1670	if (ret < 0) {
1671		dev_err(dev, "failed to get runtime PM: %d\n", ret);
1672		return ret;
1673	}
1674
1675	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1676	tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
1677	pm_runtime_put(dev);
1678
1679	return spi_controller_resume(host);
1680}
1681
1682static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
1683{
1684	struct spi_controller *host = dev_get_drvdata(dev);
1685	struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1686
1687	/* Runtime pm disabled with ACPI */
1688	if (has_acpi_companion(tqspi->dev))
1689		return 0;
1690	/* flush all write which are in PPSB queue by reading back */
1691	tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1692
1693	clk_disable_unprepare(tqspi->clk);
1694
1695	return 0;
1696}
1697
1698static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
1699{
1700	struct spi_controller *host = dev_get_drvdata(dev);
1701	struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1702	int ret;
1703
1704	/* Runtime pm disabled with ACPI */
1705	if (has_acpi_companion(tqspi->dev))
1706		return 0;
1707	ret = clk_prepare_enable(tqspi->clk);
1708	if (ret < 0)
1709		dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
1710
1711	return ret;
1712}
1713
1714static const struct dev_pm_ops tegra_qspi_pm_ops = {
1715	SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
1716	SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
1717};
1718
1719static struct platform_driver tegra_qspi_driver = {
1720	.driver = {
1721		.name		= "tegra-qspi",
1722		.pm		= &tegra_qspi_pm_ops,
1723		.of_match_table	= tegra_qspi_of_match,
1724		.acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
1725	},
1726	.probe =	tegra_qspi_probe,
1727	.remove_new =	tegra_qspi_remove,
1728};
1729module_platform_driver(tegra_qspi_driver);
1730
1731MODULE_ALIAS("platform:qspi-tegra");
1732MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
1733MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
1734MODULE_LICENSE("GPL v2");