Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2//
   3// Copyright (C) 2020 NVIDIA CORPORATION.
   4
   5#include <linux/clk.h>
   6#include <linux/completion.h>
   7#include <linux/delay.h>
   8#include <linux/dmaengine.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/dmapool.h>
  11#include <linux/err.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/iopoll.h>
  15#include <linux/kernel.h>
  16#include <linux/kthread.h>
  17#include <linux/module.h>
  18#include <linux/platform_device.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/of.h>
  21#include <linux/of_device.h>
  22#include <linux/reset.h>
  23#include <linux/spi/spi.h>
  24#include <linux/acpi.h>
  25#include <linux/property.h>
  26
  27#define QSPI_COMMAND1				0x000
  28#define QSPI_BIT_LENGTH(x)			(((x) & 0x1f) << 0)
  29#define QSPI_PACKED				BIT(5)
  30#define QSPI_INTERFACE_WIDTH_MASK		(0x03 << 7)
  31#define QSPI_INTERFACE_WIDTH(x)			(((x) & 0x03) << 7)
  32#define QSPI_INTERFACE_WIDTH_SINGLE		QSPI_INTERFACE_WIDTH(0)
  33#define QSPI_INTERFACE_WIDTH_DUAL		QSPI_INTERFACE_WIDTH(1)
  34#define QSPI_INTERFACE_WIDTH_QUAD		QSPI_INTERFACE_WIDTH(2)
  35#define QSPI_SDR_DDR_SEL			BIT(9)
  36#define QSPI_TX_EN				BIT(11)
  37#define QSPI_RX_EN				BIT(12)
  38#define QSPI_CS_SW_VAL				BIT(20)
  39#define QSPI_CS_SW_HW				BIT(21)
  40
  41#define QSPI_CS_POL_INACTIVE(n)			(1 << (22 + (n)))
  42#define QSPI_CS_POL_INACTIVE_MASK		(0xF << 22)
  43#define QSPI_CS_SEL_0				(0 << 26)
  44#define QSPI_CS_SEL_1				(1 << 26)
  45#define QSPI_CS_SEL_2				(2 << 26)
  46#define QSPI_CS_SEL_3				(3 << 26)
  47#define QSPI_CS_SEL_MASK			(3 << 26)
  48#define QSPI_CS_SEL(x)				(((x) & 0x3) << 26)
  49
  50#define QSPI_CONTROL_MODE_0			(0 << 28)
  51#define QSPI_CONTROL_MODE_3			(3 << 28)
  52#define QSPI_CONTROL_MODE_MASK			(3 << 28)
  53#define QSPI_M_S				BIT(30)
  54#define QSPI_PIO				BIT(31)
  55
  56#define QSPI_COMMAND2				0x004
  57#define QSPI_TX_TAP_DELAY(x)			(((x) & 0x3f) << 10)
  58#define QSPI_RX_TAP_DELAY(x)			(((x) & 0xff) << 0)
  59
  60#define QSPI_CS_TIMING1				0x008
  61#define QSPI_SETUP_HOLD(setup, hold)		(((setup) << 4) | (hold))
  62
  63#define QSPI_CS_TIMING2				0x00c
  64#define CYCLES_BETWEEN_PACKETS_0(x)		(((x) & 0x1f) << 0)
  65#define CS_ACTIVE_BETWEEN_PACKETS_0		BIT(5)
  66
  67#define QSPI_TRANS_STATUS			0x010
  68#define QSPI_BLK_CNT(val)			(((val) >> 0) & 0xffff)
  69#define QSPI_RDY				BIT(30)
  70
  71#define QSPI_FIFO_STATUS			0x014
  72#define QSPI_RX_FIFO_EMPTY			BIT(0)
  73#define QSPI_RX_FIFO_FULL			BIT(1)
  74#define QSPI_TX_FIFO_EMPTY			BIT(2)
  75#define QSPI_TX_FIFO_FULL			BIT(3)
  76#define QSPI_RX_FIFO_UNF			BIT(4)
  77#define QSPI_RX_FIFO_OVF			BIT(5)
  78#define QSPI_TX_FIFO_UNF			BIT(6)
  79#define QSPI_TX_FIFO_OVF			BIT(7)
  80#define QSPI_ERR				BIT(8)
  81#define QSPI_TX_FIFO_FLUSH			BIT(14)
  82#define QSPI_RX_FIFO_FLUSH			BIT(15)
  83#define QSPI_TX_FIFO_EMPTY_COUNT(val)		(((val) >> 16) & 0x7f)
  84#define QSPI_RX_FIFO_FULL_COUNT(val)		(((val) >> 23) & 0x7f)
  85
  86#define QSPI_FIFO_ERROR				(QSPI_RX_FIFO_UNF | \
  87						 QSPI_RX_FIFO_OVF | \
  88						 QSPI_TX_FIFO_UNF | \
  89						 QSPI_TX_FIFO_OVF)
  90#define QSPI_FIFO_EMPTY				(QSPI_RX_FIFO_EMPTY | \
  91						 QSPI_TX_FIFO_EMPTY)
  92
  93#define QSPI_TX_DATA				0x018
  94#define QSPI_RX_DATA				0x01c
  95
  96#define QSPI_DMA_CTL				0x020
  97#define QSPI_TX_TRIG(n)				(((n) & 0x3) << 15)
  98#define QSPI_TX_TRIG_1				QSPI_TX_TRIG(0)
  99#define QSPI_TX_TRIG_4				QSPI_TX_TRIG(1)
 100#define QSPI_TX_TRIG_8				QSPI_TX_TRIG(2)
 101#define QSPI_TX_TRIG_16				QSPI_TX_TRIG(3)
 102
 103#define QSPI_RX_TRIG(n)				(((n) & 0x3) << 19)
 104#define QSPI_RX_TRIG_1				QSPI_RX_TRIG(0)
 105#define QSPI_RX_TRIG_4				QSPI_RX_TRIG(1)
 106#define QSPI_RX_TRIG_8				QSPI_RX_TRIG(2)
 107#define QSPI_RX_TRIG_16				QSPI_RX_TRIG(3)
 108
 109#define QSPI_DMA_EN				BIT(31)
 110
 111#define QSPI_DMA_BLK				0x024
 112#define QSPI_DMA_BLK_SET(x)			(((x) & 0xffff) << 0)
 113
 114#define QSPI_TX_FIFO				0x108
 115#define QSPI_RX_FIFO				0x188
 116
 117#define QSPI_FIFO_DEPTH				64
 118
 119#define QSPI_INTR_MASK				0x18c
 120#define QSPI_INTR_RX_FIFO_UNF_MASK		BIT(25)
 121#define QSPI_INTR_RX_FIFO_OVF_MASK		BIT(26)
 122#define QSPI_INTR_TX_FIFO_UNF_MASK		BIT(27)
 123#define QSPI_INTR_TX_FIFO_OVF_MASK		BIT(28)
 124#define QSPI_INTR_RDY_MASK			BIT(29)
 125#define QSPI_INTR_RX_TX_FIFO_ERR		(QSPI_INTR_RX_FIFO_UNF_MASK | \
 126						 QSPI_INTR_RX_FIFO_OVF_MASK | \
 127						 QSPI_INTR_TX_FIFO_UNF_MASK | \
 128						 QSPI_INTR_TX_FIFO_OVF_MASK)
 129
 130#define QSPI_MISC_REG                           0x194
 131#define QSPI_NUM_DUMMY_CYCLE(x)			(((x) & 0xff) << 0)
 132#define QSPI_DUMMY_CYCLES_MAX			0xff
 133
 134#define QSPI_CMB_SEQ_CMD			0x19c
 135#define QSPI_COMMAND_VALUE_SET(X)		(((x) & 0xFF) << 0)
 136
 137#define QSPI_CMB_SEQ_CMD_CFG			0x1a0
 138#define QSPI_COMMAND_X1_X2_X4(x)		(((x) & 0x3) << 13)
 139#define QSPI_COMMAND_X1_X2_X4_MASK		(0x03 << 13)
 140#define QSPI_COMMAND_SDR_DDR			BIT(12)
 141#define QSPI_COMMAND_SIZE_SET(x)		(((x) & 0xFF) << 0)
 142
 143#define QSPI_GLOBAL_CONFIG			0X1a4
 144#define QSPI_CMB_SEQ_EN				BIT(0)
 145
 146#define QSPI_CMB_SEQ_ADDR			0x1a8
 147#define QSPI_ADDRESS_VALUE_SET(X)		(((x) & 0xFFFF) << 0)
 148
 149#define QSPI_CMB_SEQ_ADDR_CFG			0x1ac
 150#define QSPI_ADDRESS_X1_X2_X4(x)		(((x) & 0x3) << 13)
 151#define QSPI_ADDRESS_X1_X2_X4_MASK		(0x03 << 13)
 152#define QSPI_ADDRESS_SDR_DDR			BIT(12)
 153#define QSPI_ADDRESS_SIZE_SET(x)		(((x) & 0xFF) << 0)
 154
 155#define DATA_DIR_TX				BIT(0)
 156#define DATA_DIR_RX				BIT(1)
 157
 158#define QSPI_DMA_TIMEOUT			(msecs_to_jiffies(1000))
 159#define DEFAULT_QSPI_DMA_BUF_LEN		(64 * 1024)
 160#define CMD_TRANSFER				0
 161#define ADDR_TRANSFER				1
 162#define DATA_TRANSFER				2
 163
 164struct tegra_qspi_soc_data {
 165	bool has_dma;
 166	bool cmb_xfer_capable;
 167	unsigned int cs_count;
 168};
 169
 170struct tegra_qspi_client_data {
 171	int tx_clk_tap_delay;
 172	int rx_clk_tap_delay;
 173};
 174
 175struct tegra_qspi {
 176	struct device				*dev;
 177	struct spi_master			*master;
 178	/* lock to protect data accessed by irq */
 179	spinlock_t				lock;
 180
 181	struct clk				*clk;
 182	void __iomem				*base;
 183	phys_addr_t				phys;
 184	unsigned int				irq;
 185
 186	u32					cur_speed;
 187	unsigned int				cur_pos;
 188	unsigned int				words_per_32bit;
 189	unsigned int				bytes_per_word;
 190	unsigned int				curr_dma_words;
 191	unsigned int				cur_direction;
 192
 193	unsigned int				cur_rx_pos;
 194	unsigned int				cur_tx_pos;
 195
 196	unsigned int				dma_buf_size;
 197	unsigned int				max_buf_size;
 198	bool					is_curr_dma_xfer;
 199
 200	struct completion			rx_dma_complete;
 201	struct completion			tx_dma_complete;
 202
 203	u32					tx_status;
 204	u32					rx_status;
 205	u32					status_reg;
 206	bool					is_packed;
 207	bool					use_dma;
 208
 209	u32					command1_reg;
 210	u32					dma_control_reg;
 211	u32					def_command1_reg;
 212	u32					def_command2_reg;
 213	u32					spi_cs_timing1;
 214	u32					spi_cs_timing2;
 215	u8					dummy_cycles;
 216
 217	struct completion			xfer_completion;
 218	struct spi_transfer			*curr_xfer;
 219
 220	struct dma_chan				*rx_dma_chan;
 221	u32					*rx_dma_buf;
 222	dma_addr_t				rx_dma_phys;
 223	struct dma_async_tx_descriptor		*rx_dma_desc;
 224
 225	struct dma_chan				*tx_dma_chan;
 226	u32					*tx_dma_buf;
 227	dma_addr_t				tx_dma_phys;
 228	struct dma_async_tx_descriptor		*tx_dma_desc;
 229	const struct tegra_qspi_soc_data	*soc_data;
 230};
 231
 232static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
 233{
 234	return readl(tqspi->base + offset);
 235}
 236
 237static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
 238{
 239	writel(value, tqspi->base + offset);
 240
 241	/* read back register to make sure that register writes completed */
 242	if (offset != QSPI_TX_FIFO)
 243		readl(tqspi->base + QSPI_COMMAND1);
 244}
 245
 246static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
 247{
 248	u32 value;
 249
 250	/* write 1 to clear status register */
 251	value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
 252	tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
 253
 254	value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
 255	if (!(value & QSPI_INTR_RDY_MASK)) {
 256		value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
 257		tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
 258	}
 259
 260	/* clear fifo status error if any */
 261	value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 262	if (value & QSPI_ERR)
 263		tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
 264}
 265
 266static unsigned int
 267tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
 268{
 269	unsigned int max_word, max_len, total_fifo_words;
 270	unsigned int remain_len = t->len - tqspi->cur_pos;
 271	unsigned int bits_per_word = t->bits_per_word;
 272
 273	tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
 274
 275	/*
 276	 * Tegra QSPI controller supports packed or unpacked mode transfers.
 277	 * Packed mode is used for data transfers using 8, 16, or 32 bits per
 278	 * word with a minimum transfer of 1 word and for all other transfers
 279	 * unpacked mode will be used.
 280	 */
 281
 282	if ((bits_per_word == 8 || bits_per_word == 16 ||
 283	     bits_per_word == 32) && t->len > 3) {
 284		tqspi->is_packed = true;
 285		tqspi->words_per_32bit = 32 / bits_per_word;
 286	} else {
 287		tqspi->is_packed = false;
 288		tqspi->words_per_32bit = 1;
 289	}
 290
 291	if (tqspi->is_packed) {
 292		max_len = min(remain_len, tqspi->max_buf_size);
 293		tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
 294		total_fifo_words = (max_len + 3) / 4;
 295	} else {
 296		max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
 297		max_word = min(max_word, tqspi->max_buf_size / 4);
 298		tqspi->curr_dma_words = max_word;
 299		total_fifo_words = max_word;
 300	}
 301
 302	return total_fifo_words;
 303}
 304
 305static unsigned int
 306tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 307{
 308	unsigned int written_words, fifo_words_left, count;
 309	unsigned int len, tx_empty_count, max_n_32bit, i;
 310	u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
 311	u32 fifo_status;
 312
 313	fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 314	tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
 315
 316	if (tqspi->is_packed) {
 317		fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
 318		written_words = min(fifo_words_left, tqspi->curr_dma_words);
 319		len = written_words * tqspi->bytes_per_word;
 320		max_n_32bit = DIV_ROUND_UP(len, 4);
 321		for (count = 0; count < max_n_32bit; count++) {
 322			u32 x = 0;
 323
 324			for (i = 0; (i < 4) && len; i++, len--)
 325				x |= (u32)(*tx_buf++) << (i * 8);
 326			tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
 327		}
 328
 329		tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
 330	} else {
 331		unsigned int write_bytes;
 332		u8 bytes_per_word = tqspi->bytes_per_word;
 333
 334		max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
 335		written_words = max_n_32bit;
 336		len = written_words * tqspi->bytes_per_word;
 337		if (len > t->len - tqspi->cur_pos)
 338			len = t->len - tqspi->cur_pos;
 339		write_bytes = len;
 340		for (count = 0; count < max_n_32bit; count++) {
 341			u32 x = 0;
 342
 343			for (i = 0; len && (i < bytes_per_word); i++, len--)
 344				x |= (u32)(*tx_buf++) << (i * 8);
 345			tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
 346		}
 347
 348		tqspi->cur_tx_pos += write_bytes;
 349	}
 350
 351	return written_words;
 352}
 353
 354static unsigned int
 355tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 356{
 357	u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
 358	unsigned int len, rx_full_count, count, i;
 359	unsigned int read_words = 0;
 360	u32 fifo_status, x;
 361
 362	fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 363	rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
 364	if (tqspi->is_packed) {
 365		len = tqspi->curr_dma_words * tqspi->bytes_per_word;
 366		for (count = 0; count < rx_full_count; count++) {
 367			x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
 368
 369			for (i = 0; len && (i < 4); i++, len--)
 370				*rx_buf++ = (x >> i * 8) & 0xff;
 371		}
 372
 373		read_words += tqspi->curr_dma_words;
 374		tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
 375	} else {
 376		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
 377		u8 bytes_per_word = tqspi->bytes_per_word;
 378		unsigned int read_bytes;
 379
 380		len = rx_full_count * bytes_per_word;
 381		if (len > t->len - tqspi->cur_pos)
 382			len = t->len - tqspi->cur_pos;
 383		read_bytes = len;
 384		for (count = 0; count < rx_full_count; count++) {
 385			x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
 386
 387			for (i = 0; len && (i < bytes_per_word); i++, len--)
 388				*rx_buf++ = (x >> (i * 8)) & 0xff;
 389		}
 390
 391		read_words += rx_full_count;
 392		tqspi->cur_rx_pos += read_bytes;
 393	}
 394
 395	return read_words;
 396}
 397
 398static void
 399tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 400{
 401	dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
 402				tqspi->dma_buf_size, DMA_TO_DEVICE);
 403
 404	/*
 405	 * In packed mode, each word in FIFO may contain multiple packets
 406	 * based on bits per word. So all bytes in each FIFO word are valid.
 407	 *
 408	 * In unpacked mode, each word in FIFO contains single packet and
 409	 * based on bits per word any remaining bits in FIFO word will be
 410	 * ignored by the hardware and are invalid bits.
 411	 */
 412	if (tqspi->is_packed) {
 413		tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
 414	} else {
 415		u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
 416		unsigned int i, count, consume, write_bytes;
 417
 418		/*
 419		 * Fill tx_dma_buf to contain single packet in each word based
 420		 * on bits per word from SPI core tx_buf.
 421		 */
 422		consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
 423		if (consume > t->len - tqspi->cur_pos)
 424			consume = t->len - tqspi->cur_pos;
 425		write_bytes = consume;
 426		for (count = 0; count < tqspi->curr_dma_words; count++) {
 427			u32 x = 0;
 428
 429			for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
 430				x |= (u32)(*tx_buf++) << (i * 8);
 431			tqspi->tx_dma_buf[count] = x;
 432		}
 433
 434		tqspi->cur_tx_pos += write_bytes;
 435	}
 436
 437	dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
 438				   tqspi->dma_buf_size, DMA_TO_DEVICE);
 439}
 440
 441static void
 442tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
 443{
 444	dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
 445				tqspi->dma_buf_size, DMA_FROM_DEVICE);
 446
 447	if (tqspi->is_packed) {
 448		tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
 449	} else {
 450		unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
 451		u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
 452		unsigned int i, count, consume, read_bytes;
 453
 454		/*
 455		 * Each FIFO word contains single data packet.
 456		 * Skip invalid bits in each FIFO word based on bits per word
 457		 * and align bytes while filling in SPI core rx_buf.
 458		 */
 459		consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
 460		if (consume > t->len - tqspi->cur_pos)
 461			consume = t->len - tqspi->cur_pos;
 462		read_bytes = consume;
 463		for (count = 0; count < tqspi->curr_dma_words; count++) {
 464			u32 x = tqspi->rx_dma_buf[count] & rx_mask;
 465
 466			for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
 467				*rx_buf++ = (x >> (i * 8)) & 0xff;
 468		}
 469
 470		tqspi->cur_rx_pos += read_bytes;
 471	}
 472
 473	dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
 474				   tqspi->dma_buf_size, DMA_FROM_DEVICE);
 475}
 476
 477static void tegra_qspi_dma_complete(void *args)
 478{
 479	struct completion *dma_complete = args;
 480
 481	complete(dma_complete);
 482}
 483
 484static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
 485{
 486	dma_addr_t tx_dma_phys;
 487
 488	reinit_completion(&tqspi->tx_dma_complete);
 489
 490	if (tqspi->is_packed)
 491		tx_dma_phys = t->tx_dma;
 492	else
 493		tx_dma_phys = tqspi->tx_dma_phys;
 494
 495	tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
 496							 len, DMA_MEM_TO_DEV,
 497							 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
 498
 499	if (!tqspi->tx_dma_desc) {
 500		dev_err(tqspi->dev, "Unable to get TX descriptor\n");
 501		return -EIO;
 502	}
 503
 504	tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
 505	tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
 506	dmaengine_submit(tqspi->tx_dma_desc);
 507	dma_async_issue_pending(tqspi->tx_dma_chan);
 508
 509	return 0;
 510}
 511
 512static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
 513{
 514	dma_addr_t rx_dma_phys;
 515
 516	reinit_completion(&tqspi->rx_dma_complete);
 517
 518	if (tqspi->is_packed)
 519		rx_dma_phys = t->rx_dma;
 520	else
 521		rx_dma_phys = tqspi->rx_dma_phys;
 522
 523	tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
 524							 len, DMA_DEV_TO_MEM,
 525							 DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
 526
 527	if (!tqspi->rx_dma_desc) {
 528		dev_err(tqspi->dev, "Unable to get RX descriptor\n");
 529		return -EIO;
 530	}
 531
 532	tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
 533	tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
 534	dmaengine_submit(tqspi->rx_dma_desc);
 535	dma_async_issue_pending(tqspi->rx_dma_chan);
 536
 537	return 0;
 538}
 539
 540static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
 541{
 542	void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
 543	u32 val;
 544
 545	val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 546	if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
 547		return 0;
 548
 549	val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
 550	tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
 551
 552	if (!atomic)
 553		return readl_relaxed_poll_timeout(addr, val,
 554						  (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
 555						  1000, 1000000);
 556
 557	return readl_relaxed_poll_timeout_atomic(addr, val,
 558						 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
 559						 1000, 1000000);
 560}
 561
 562static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
 563{
 564	u32 intr_mask;
 565
 566	intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
 567	intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
 568	tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
 569}
 570
 571static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 572{
 573	u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
 574	u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
 575	unsigned int len;
 576
 577	len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 578
 579	if (t->tx_buf) {
 580		t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
 581		if (dma_mapping_error(tqspi->dev, t->tx_dma))
 582			return -ENOMEM;
 583	}
 584
 585	if (t->rx_buf) {
 586		t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
 587		if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
 588			dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
 589			return -ENOMEM;
 590		}
 591	}
 592
 593	return 0;
 594}
 595
 596static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 597{
 598	unsigned int len;
 599
 600	len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 601
 602	dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
 603	dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
 604}
 605
 606static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 607{
 608	struct dma_slave_config dma_sconfig = { 0 };
 609	unsigned int len;
 610	u8 dma_burst;
 611	int ret = 0;
 612	u32 val;
 613
 614	if (tqspi->is_packed) {
 615		ret = tegra_qspi_dma_map_xfer(tqspi, t);
 616		if (ret < 0)
 617			return ret;
 618	}
 619
 620	val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
 621	tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
 622
 623	tegra_qspi_unmask_irq(tqspi);
 624
 625	if (tqspi->is_packed)
 626		len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 627	else
 628		len = tqspi->curr_dma_words * 4;
 629
 630	/* set attention level based on length of transfer */
 631	val = 0;
 632	if (len & 0xf) {
 633		val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
 634		dma_burst = 1;
 635	} else if (((len) >> 4) & 0x1) {
 636		val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
 637		dma_burst = 4;
 638	} else {
 639		val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
 640		dma_burst = 8;
 641	}
 642
 643	tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
 644	tqspi->dma_control_reg = val;
 645
 646	dma_sconfig.device_fc = true;
 647	if (tqspi->cur_direction & DATA_DIR_TX) {
 648		dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
 649		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 650		dma_sconfig.dst_maxburst = dma_burst;
 651		ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
 652		if (ret < 0) {
 653			dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
 654			return ret;
 655		}
 656
 657		tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
 658		ret = tegra_qspi_start_tx_dma(tqspi, t, len);
 659		if (ret < 0) {
 660			dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
 661			return ret;
 662		}
 663	}
 664
 665	if (tqspi->cur_direction & DATA_DIR_RX) {
 666		dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
 667		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 668		dma_sconfig.src_maxburst = dma_burst;
 669		ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
 670		if (ret < 0) {
 671			dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
 672			return ret;
 673		}
 674
 675		dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
 676					   tqspi->dma_buf_size,
 677					   DMA_FROM_DEVICE);
 678
 679		ret = tegra_qspi_start_rx_dma(tqspi, t, len);
 680		if (ret < 0) {
 681			dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
 682			if (tqspi->cur_direction & DATA_DIR_TX)
 683				dmaengine_terminate_all(tqspi->tx_dma_chan);
 684			return ret;
 685		}
 686	}
 687
 688	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
 689
 690	tqspi->is_curr_dma_xfer = true;
 691	tqspi->dma_control_reg = val;
 692	val |= QSPI_DMA_EN;
 693	tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
 694
 695	return ret;
 696}
 697
 698static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
 699{
 700	u32 val;
 701	unsigned int cur_words;
 702
 703	if (qspi->cur_direction & DATA_DIR_TX)
 704		cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
 705	else
 706		cur_words = qspi->curr_dma_words;
 707
 708	val = QSPI_DMA_BLK_SET(cur_words - 1);
 709	tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
 710
 711	tegra_qspi_unmask_irq(qspi);
 712
 713	qspi->is_curr_dma_xfer = false;
 714	val = qspi->command1_reg;
 715	val |= QSPI_PIO;
 716	tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
 717
 718	return 0;
 719}
 720
 721static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
 722{
 723	if (!tqspi->soc_data->has_dma)
 724		return;
 725
 726	if (tqspi->tx_dma_buf) {
 727		dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
 728				  tqspi->tx_dma_buf, tqspi->tx_dma_phys);
 729		tqspi->tx_dma_buf = NULL;
 730	}
 731
 732	if (tqspi->tx_dma_chan) {
 733		dma_release_channel(tqspi->tx_dma_chan);
 734		tqspi->tx_dma_chan = NULL;
 735	}
 736
 737	if (tqspi->rx_dma_buf) {
 738		dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
 739				  tqspi->rx_dma_buf, tqspi->rx_dma_phys);
 740		tqspi->rx_dma_buf = NULL;
 741	}
 742
 743	if (tqspi->rx_dma_chan) {
 744		dma_release_channel(tqspi->rx_dma_chan);
 745		tqspi->rx_dma_chan = NULL;
 746	}
 747}
 748
 749static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
 750{
 751	struct dma_chan *dma_chan;
 752	dma_addr_t dma_phys;
 753	u32 *dma_buf;
 754	int err;
 755
 756	if (!tqspi->soc_data->has_dma)
 757		return 0;
 758
 759	dma_chan = dma_request_chan(tqspi->dev, "rx");
 760	if (IS_ERR(dma_chan)) {
 761		err = PTR_ERR(dma_chan);
 762		goto err_out;
 763	}
 764
 765	tqspi->rx_dma_chan = dma_chan;
 766
 767	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
 768	if (!dma_buf) {
 769		err = -ENOMEM;
 770		goto err_out;
 771	}
 772
 773	tqspi->rx_dma_buf = dma_buf;
 774	tqspi->rx_dma_phys = dma_phys;
 775
 776	dma_chan = dma_request_chan(tqspi->dev, "tx");
 777	if (IS_ERR(dma_chan)) {
 778		err = PTR_ERR(dma_chan);
 779		goto err_out;
 780	}
 781
 782	tqspi->tx_dma_chan = dma_chan;
 783
 784	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
 785	if (!dma_buf) {
 786		err = -ENOMEM;
 787		goto err_out;
 788	}
 789
 790	tqspi->tx_dma_buf = dma_buf;
 791	tqspi->tx_dma_phys = dma_phys;
 792	tqspi->use_dma = true;
 793
 794	return 0;
 795
 796err_out:
 797	tegra_qspi_deinit_dma(tqspi);
 798
 799	if (err != -EPROBE_DEFER) {
 800		dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
 801		dev_err(tqspi->dev, "falling back to PIO\n");
 802		return 0;
 803	}
 804
 805	return err;
 806}
 807
 808static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
 809					 bool is_first_of_msg)
 810{
 811	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
 812	struct tegra_qspi_client_data *cdata = spi->controller_data;
 813	u32 command1, command2, speed = t->speed_hz;
 814	u8 bits_per_word = t->bits_per_word;
 815	u32 tx_tap = 0, rx_tap = 0;
 816	int req_mode;
 817
 818	if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
 819		clk_set_rate(tqspi->clk, speed);
 820		tqspi->cur_speed = speed;
 821	}
 822
 823	tqspi->cur_pos = 0;
 824	tqspi->cur_rx_pos = 0;
 825	tqspi->cur_tx_pos = 0;
 826	tqspi->curr_xfer = t;
 827
 828	if (is_first_of_msg) {
 829		tegra_qspi_mask_clear_irq(tqspi);
 830
 831		command1 = tqspi->def_command1_reg;
 832		command1 |= QSPI_CS_SEL(spi->chip_select);
 833		command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
 834
 835		command1 &= ~QSPI_CONTROL_MODE_MASK;
 836		req_mode = spi->mode & 0x3;
 837		if (req_mode == SPI_MODE_3)
 838			command1 |= QSPI_CONTROL_MODE_3;
 839		else
 840			command1 |= QSPI_CONTROL_MODE_0;
 841
 842		if (spi->mode & SPI_CS_HIGH)
 843			command1 |= QSPI_CS_SW_VAL;
 844		else
 845			command1 &= ~QSPI_CS_SW_VAL;
 846		tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
 847
 848		if (cdata && cdata->tx_clk_tap_delay)
 849			tx_tap = cdata->tx_clk_tap_delay;
 850
 851		if (cdata && cdata->rx_clk_tap_delay)
 852			rx_tap = cdata->rx_clk_tap_delay;
 853
 854		command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
 855		if (command2 != tqspi->def_command2_reg)
 856			tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
 857
 858	} else {
 859		command1 = tqspi->command1_reg;
 860		command1 &= ~QSPI_BIT_LENGTH(~0);
 861		command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
 862	}
 863
 864	command1 &= ~QSPI_SDR_DDR_SEL;
 865
 866	return command1;
 867}
 868
 869static int tegra_qspi_start_transfer_one(struct spi_device *spi,
 870					 struct spi_transfer *t, u32 command1)
 871{
 872	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
 873	unsigned int total_fifo_words;
 874	u8 bus_width = 0;
 875	int ret;
 876
 877	total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
 878
 879	command1 &= ~QSPI_PACKED;
 880	if (tqspi->is_packed)
 881		command1 |= QSPI_PACKED;
 882	tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
 883
 884	tqspi->cur_direction = 0;
 885
 886	command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
 887	if (t->rx_buf) {
 888		command1 |= QSPI_RX_EN;
 889		tqspi->cur_direction |= DATA_DIR_RX;
 890		bus_width = t->rx_nbits;
 891	}
 892
 893	if (t->tx_buf) {
 894		command1 |= QSPI_TX_EN;
 895		tqspi->cur_direction |= DATA_DIR_TX;
 896		bus_width = t->tx_nbits;
 897	}
 898
 899	command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
 900
 901	if (bus_width == SPI_NBITS_QUAD)
 902		command1 |= QSPI_INTERFACE_WIDTH_QUAD;
 903	else if (bus_width == SPI_NBITS_DUAL)
 904		command1 |= QSPI_INTERFACE_WIDTH_DUAL;
 905	else
 906		command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
 907
 908	tqspi->command1_reg = command1;
 909
 910	tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
 911
 912	ret = tegra_qspi_flush_fifos(tqspi, false);
 913	if (ret < 0)
 914		return ret;
 915
 916	if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
 917		ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
 918	else
 919		ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
 920
 921	return ret;
 922}
 923
 924static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
 925{
 926	struct tegra_qspi_client_data *cdata;
 927	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
 928
 929	cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
 930	if (!cdata)
 931		return NULL;
 932
 933	device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
 934				 &cdata->tx_clk_tap_delay);
 935	device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
 936				 &cdata->rx_clk_tap_delay);
 937
 938	return cdata;
 939}
 940
 941static int tegra_qspi_setup(struct spi_device *spi)
 942{
 943	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
 944	struct tegra_qspi_client_data *cdata = spi->controller_data;
 945	unsigned long flags;
 946	u32 val;
 947	int ret;
 948
 949	ret = pm_runtime_resume_and_get(tqspi->dev);
 950	if (ret < 0) {
 951		dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
 952		return ret;
 953	}
 954
 955	if (!cdata) {
 956		cdata = tegra_qspi_parse_cdata_dt(spi);
 957		spi->controller_data = cdata;
 958	}
 959	spin_lock_irqsave(&tqspi->lock, flags);
 960
 961	/* keep default cs state to inactive */
 962	val = tqspi->def_command1_reg;
 963	val |= QSPI_CS_SEL(spi->chip_select);
 964	if (spi->mode & SPI_CS_HIGH)
 965		val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select);
 966	else
 967		val |= QSPI_CS_POL_INACTIVE(spi->chip_select);
 968
 969	tqspi->def_command1_reg = val;
 970	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
 971
 972	spin_unlock_irqrestore(&tqspi->lock, flags);
 973
 974	pm_runtime_put(tqspi->dev);
 975
 976	return 0;
 977}
 978
 979static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
 980{
 981	dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
 982	dev_dbg(tqspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
 983		tegra_qspi_readl(tqspi, QSPI_COMMAND1),
 984		tegra_qspi_readl(tqspi, QSPI_COMMAND2));
 985	dev_dbg(tqspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
 986		tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
 987		tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
 988	dev_dbg(tqspi->dev, "INTR_MASK:  0x%08x | MISC: 0x%08x\n",
 989		tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
 990		tegra_qspi_readl(tqspi, QSPI_MISC_REG));
 991	dev_dbg(tqspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
 992		tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
 993		tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
 994}
 995
 996static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
 997{
 998	dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
 999	tegra_qspi_dump_regs(tqspi);
1000	tegra_qspi_flush_fifos(tqspi, true);
1001	if (device_reset(tqspi->dev) < 0)
1002		dev_warn_once(tqspi->dev, "device reset failed\n");
1003}
1004
1005static void tegra_qspi_transfer_end(struct spi_device *spi)
1006{
1007	struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
1008	int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1009
1010	if (cs_val)
1011		tqspi->command1_reg |= QSPI_CS_SW_VAL;
1012	else
1013		tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
1014	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1015	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1016}
1017
1018static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
1019{
1020	u32 cmd_config = 0;
1021
1022	/* Extract Command configuration and value */
1023	if (is_ddr)
1024		cmd_config |= QSPI_COMMAND_SDR_DDR;
1025	else
1026		cmd_config &= ~QSPI_COMMAND_SDR_DDR;
1027
1028	cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
1029	cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
1030
1031	return cmd_config;
1032}
1033
1034static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
1035{
1036	u32 addr_config = 0;
1037
1038	/* Extract Address configuration and value */
1039	is_ddr = 0; //Only SDR mode supported
1040	bus_width = 0; //X1 mode
1041
1042	if (is_ddr)
1043		addr_config |= QSPI_ADDRESS_SDR_DDR;
1044	else
1045		addr_config &= ~QSPI_ADDRESS_SDR_DDR;
1046
1047	addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
1048	addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
1049
1050	return addr_config;
1051}
1052
1053static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
1054					struct spi_message *msg)
1055{
1056	bool is_first_msg = true;
1057	struct spi_transfer *xfer;
1058	struct spi_device *spi = msg->spi;
1059	u8 transfer_phase = 0;
1060	u32 cmd1 = 0, dma_ctl = 0;
1061	int ret = 0;
1062	u32 address_value = 0;
1063	u32 cmd_config = 0, addr_config = 0;
1064	u8 cmd_value = 0, val = 0;
1065
1066	/* Enable Combined sequence mode */
1067	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1068	val |= QSPI_CMB_SEQ_EN;
1069	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1070	/* Process individual transfer list */
1071	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1072		switch (transfer_phase) {
1073		case CMD_TRANSFER:
1074			/* X1 SDR mode */
1075			cmd_config = tegra_qspi_cmd_config(false, 0,
1076							   xfer->len);
1077			cmd_value = *((const u8 *)(xfer->tx_buf));
1078			break;
1079		case ADDR_TRANSFER:
1080			/* X1 SDR mode */
1081			addr_config = tegra_qspi_addr_config(false, 0,
1082							     xfer->len);
1083			address_value = *((const u32 *)(xfer->tx_buf));
1084			break;
1085		case DATA_TRANSFER:
1086			/* Program Command, Address value in register */
1087			tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
1088			tegra_qspi_writel(tqspi, address_value,
1089					  QSPI_CMB_SEQ_ADDR);
1090			/* Program Command and Address config in register */
1091			tegra_qspi_writel(tqspi, cmd_config,
1092					  QSPI_CMB_SEQ_CMD_CFG);
1093			tegra_qspi_writel(tqspi, addr_config,
1094					  QSPI_CMB_SEQ_ADDR_CFG);
1095
1096			reinit_completion(&tqspi->xfer_completion);
1097			cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
1098							     is_first_msg);
1099			ret = tegra_qspi_start_transfer_one(spi, xfer,
1100							    cmd1);
1101
1102			if (ret < 0) {
1103				dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
1104					ret);
1105				return ret;
1106			}
1107
1108			is_first_msg = false;
1109			ret = wait_for_completion_timeout
1110					(&tqspi->xfer_completion,
1111					QSPI_DMA_TIMEOUT);
1112
1113			if (WARN_ON(ret == 0)) {
1114				dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
1115					ret);
1116				if (tqspi->is_curr_dma_xfer &&
1117				    (tqspi->cur_direction & DATA_DIR_TX))
1118					dmaengine_terminate_all
1119						(tqspi->tx_dma_chan);
1120
1121				if (tqspi->is_curr_dma_xfer &&
1122				    (tqspi->cur_direction & DATA_DIR_RX))
1123					dmaengine_terminate_all
1124						(tqspi->rx_dma_chan);
1125
1126				/* Abort transfer by resetting pio/dma bit */
1127				if (!tqspi->is_curr_dma_xfer) {
1128					cmd1 = tegra_qspi_readl
1129							(tqspi,
1130							 QSPI_COMMAND1);
1131					cmd1 &= ~QSPI_PIO;
1132					tegra_qspi_writel
1133							(tqspi, cmd1,
1134							 QSPI_COMMAND1);
1135				} else {
1136					dma_ctl = tegra_qspi_readl
1137							(tqspi,
1138							 QSPI_DMA_CTL);
1139					dma_ctl &= ~QSPI_DMA_EN;
1140					tegra_qspi_writel(tqspi, dma_ctl,
1141							  QSPI_DMA_CTL);
1142				}
1143
1144				/* Reset controller if timeout happens */
1145				if (device_reset(tqspi->dev) < 0)
1146					dev_warn_once(tqspi->dev,
1147						      "device reset failed\n");
1148				ret = -EIO;
1149				goto exit;
1150			}
1151
1152			if (tqspi->tx_status ||  tqspi->rx_status) {
1153				dev_err(tqspi->dev, "QSPI Transfer failed\n");
1154				tqspi->tx_status = 0;
1155				tqspi->rx_status = 0;
1156				ret = -EIO;
1157				goto exit;
1158			}
1159			break;
1160		default:
1161			ret = -EINVAL;
1162			goto exit;
1163		}
1164		msg->actual_length += xfer->len;
1165		transfer_phase++;
1166	}
1167	if (!xfer->cs_change) {
1168		tegra_qspi_transfer_end(spi);
1169		spi_transfer_delay_exec(xfer);
1170	}
1171	ret = 0;
1172
1173exit:
1174	msg->status = ret;
1175
1176	return ret;
1177}
1178
1179static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
1180					    struct spi_message *msg)
1181{
1182	struct spi_device *spi = msg->spi;
1183	struct spi_transfer *transfer;
1184	bool is_first_msg = true;
1185	int ret = 0, val = 0;
1186
1187	msg->status = 0;
1188	msg->actual_length = 0;
1189	tqspi->tx_status = 0;
1190	tqspi->rx_status = 0;
1191
1192	/* Disable Combined sequence mode */
1193	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1194	val &= ~QSPI_CMB_SEQ_EN;
1195	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1196	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
1197		struct spi_transfer *xfer = transfer;
1198		u8 dummy_bytes = 0;
1199		u32 cmd1;
1200
1201		tqspi->dummy_cycles = 0;
1202		/*
1203		 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
1204		 * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
1205		 * So, check if the next transfer is dummy data transfer and program dummy
1206		 * clock cycles along with the current transfer and skip next transfer.
1207		 */
1208		if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
1209			struct spi_transfer *next_xfer;
1210
1211			next_xfer = list_next_entry(xfer, transfer_list);
1212			if (next_xfer->dummy_data) {
1213				u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
1214
1215				if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
1216					tqspi->dummy_cycles = dummy_cycles;
1217					dummy_bytes = next_xfer->len;
1218					transfer = next_xfer;
1219				}
1220			}
1221		}
1222
1223		reinit_completion(&tqspi->xfer_completion);
1224
1225		cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
1226
1227		ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
1228		if (ret < 0) {
1229			dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
1230			goto complete_xfer;
1231		}
1232
1233		ret = wait_for_completion_timeout(&tqspi->xfer_completion,
1234						  QSPI_DMA_TIMEOUT);
1235		if (WARN_ON(ret == 0)) {
1236			dev_err(tqspi->dev, "transfer timeout\n");
1237			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
1238				dmaengine_terminate_all(tqspi->tx_dma_chan);
1239			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
1240				dmaengine_terminate_all(tqspi->rx_dma_chan);
1241			tegra_qspi_handle_error(tqspi);
1242			ret = -EIO;
1243			goto complete_xfer;
1244		}
1245
1246		if (tqspi->tx_status ||  tqspi->rx_status) {
1247			tegra_qspi_handle_error(tqspi);
1248			ret = -EIO;
1249			goto complete_xfer;
1250		}
1251
1252		msg->actual_length += xfer->len + dummy_bytes;
1253
1254complete_xfer:
1255		if (ret < 0) {
1256			tegra_qspi_transfer_end(spi);
1257			spi_transfer_delay_exec(xfer);
1258			goto exit;
1259		}
1260
1261		if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
1262			/* de-activate CS after last transfer only when cs_change is not set */
1263			if (!xfer->cs_change) {
1264				tegra_qspi_transfer_end(spi);
1265				spi_transfer_delay_exec(xfer);
1266			}
1267		} else if (xfer->cs_change) {
1268			 /* de-activated CS between the transfers only when cs_change is set */
1269			tegra_qspi_transfer_end(spi);
1270			spi_transfer_delay_exec(xfer);
1271		}
1272	}
1273
1274	ret = 0;
1275exit:
1276	msg->status = ret;
1277
1278	return ret;
1279}
1280
1281static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
1282					struct spi_message *msg)
1283{
1284	int transfer_count = 0;
1285	struct spi_transfer *xfer;
1286
1287	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1288		transfer_count++;
1289	}
1290	if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
1291		return false;
1292	xfer = list_first_entry(&msg->transfers, typeof(*xfer),
1293				transfer_list);
1294	if (xfer->len > 2)
1295		return false;
1296	xfer = list_next_entry(xfer, transfer_list);
1297	if (xfer->len > 4 || xfer->len < 3)
1298		return false;
1299	xfer = list_next_entry(xfer, transfer_list);
1300	if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
1301		return false;
1302
1303	return true;
1304}
1305
1306static int tegra_qspi_transfer_one_message(struct spi_master *master,
1307					   struct spi_message *msg)
1308{
1309	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1310	int ret;
1311
1312	if (tegra_qspi_validate_cmb_seq(tqspi, msg))
1313		ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
1314	else
1315		ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
1316
1317	spi_finalize_current_message(master);
1318
1319	return ret;
1320}
1321
1322static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
1323{
1324	struct spi_transfer *t = tqspi->curr_xfer;
1325	unsigned long flags;
1326
1327	spin_lock_irqsave(&tqspi->lock, flags);
1328
1329	if (tqspi->tx_status ||  tqspi->rx_status) {
1330		tegra_qspi_handle_error(tqspi);
1331		complete(&tqspi->xfer_completion);
1332		goto exit;
1333	}
1334
1335	if (tqspi->cur_direction & DATA_DIR_RX)
1336		tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
1337
1338	if (tqspi->cur_direction & DATA_DIR_TX)
1339		tqspi->cur_pos = tqspi->cur_tx_pos;
1340	else
1341		tqspi->cur_pos = tqspi->cur_rx_pos;
1342
1343	if (tqspi->cur_pos == t->len) {
1344		complete(&tqspi->xfer_completion);
1345		goto exit;
1346	}
1347
1348	tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1349	tegra_qspi_start_cpu_based_transfer(tqspi, t);
1350exit:
1351	spin_unlock_irqrestore(&tqspi->lock, flags);
1352	return IRQ_HANDLED;
1353}
1354
1355static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1356{
1357	struct spi_transfer *t = tqspi->curr_xfer;
1358	unsigned int total_fifo_words;
1359	unsigned long flags;
1360	long wait_status;
1361	int err = 0;
1362
1363	if (tqspi->cur_direction & DATA_DIR_TX) {
1364		if (tqspi->tx_status) {
1365			dmaengine_terminate_all(tqspi->tx_dma_chan);
1366			err += 1;
1367		} else {
1368			wait_status = wait_for_completion_interruptible_timeout(
1369				&tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
1370			if (wait_status <= 0) {
1371				dmaengine_terminate_all(tqspi->tx_dma_chan);
1372				dev_err(tqspi->dev, "failed TX DMA transfer\n");
1373				err += 1;
1374			}
1375		}
1376	}
1377
1378	if (tqspi->cur_direction & DATA_DIR_RX) {
1379		if (tqspi->rx_status) {
1380			dmaengine_terminate_all(tqspi->rx_dma_chan);
1381			err += 2;
1382		} else {
1383			wait_status = wait_for_completion_interruptible_timeout(
1384				&tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
1385			if (wait_status <= 0) {
1386				dmaengine_terminate_all(tqspi->rx_dma_chan);
1387				dev_err(tqspi->dev, "failed RX DMA transfer\n");
1388				err += 2;
1389			}
1390		}
1391	}
1392
1393	spin_lock_irqsave(&tqspi->lock, flags);
1394
1395	if (err) {
1396		tegra_qspi_dma_unmap_xfer(tqspi, t);
1397		tegra_qspi_handle_error(tqspi);
1398		complete(&tqspi->xfer_completion);
1399		goto exit;
1400	}
1401
1402	if (tqspi->cur_direction & DATA_DIR_RX)
1403		tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
1404
1405	if (tqspi->cur_direction & DATA_DIR_TX)
1406		tqspi->cur_pos = tqspi->cur_tx_pos;
1407	else
1408		tqspi->cur_pos = tqspi->cur_rx_pos;
1409
1410	if (tqspi->cur_pos == t->len) {
1411		tegra_qspi_dma_unmap_xfer(tqspi, t);
1412		complete(&tqspi->xfer_completion);
1413		goto exit;
1414	}
1415
1416	tegra_qspi_dma_unmap_xfer(tqspi, t);
1417
1418	/* continue transfer in current message */
1419	total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1420	if (total_fifo_words > QSPI_FIFO_DEPTH)
1421		err = tegra_qspi_start_dma_based_transfer(tqspi, t);
1422	else
1423		err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1424
1425exit:
1426	spin_unlock_irqrestore(&tqspi->lock, flags);
1427	return IRQ_HANDLED;
1428}
1429
1430static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1431{
1432	struct tegra_qspi *tqspi = context_data;
1433
1434	tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
1435
1436	if (tqspi->cur_direction & DATA_DIR_TX)
1437		tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
1438
1439	if (tqspi->cur_direction & DATA_DIR_RX)
1440		tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
1441
1442	tegra_qspi_mask_clear_irq(tqspi);
1443
1444	if (!tqspi->is_curr_dma_xfer)
1445		return handle_cpu_based_xfer(tqspi);
1446
1447	return handle_dma_based_xfer(tqspi);
1448}
1449
1450static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
1451	.has_dma = true,
1452	.cmb_xfer_capable = false,
1453	.cs_count = 1,
1454};
1455
1456static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
1457	.has_dma = true,
1458	.cmb_xfer_capable = true,
1459	.cs_count = 1,
1460};
1461
1462static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
1463	.has_dma = false,
1464	.cmb_xfer_capable = true,
1465	.cs_count = 1,
1466};
1467
1468static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
1469	.has_dma = false,
1470	.cmb_xfer_capable = true,
1471	.cs_count = 4,
1472};
1473
1474static const struct of_device_id tegra_qspi_of_match[] = {
1475	{
1476		.compatible = "nvidia,tegra210-qspi",
1477		.data	    = &tegra210_qspi_soc_data,
1478	}, {
1479		.compatible = "nvidia,tegra186-qspi",
1480		.data	    = &tegra186_qspi_soc_data,
1481	}, {
1482		.compatible = "nvidia,tegra194-qspi",
1483		.data	    = &tegra186_qspi_soc_data,
1484	}, {
1485		.compatible = "nvidia,tegra234-qspi",
1486		.data	    = &tegra234_qspi_soc_data,
1487	}, {
1488		.compatible = "nvidia,tegra241-qspi",
1489		.data	    = &tegra241_qspi_soc_data,
1490	},
1491	{}
1492};
1493
1494MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
1495
1496#ifdef CONFIG_ACPI
1497static const struct acpi_device_id tegra_qspi_acpi_match[] = {
1498	{
1499		.id = "NVDA1213",
1500		.driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
1501	}, {
1502		.id = "NVDA1313",
1503		.driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
1504	}, {
1505		.id = "NVDA1413",
1506		.driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
1507	}, {
1508		.id = "NVDA1513",
1509		.driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
1510	},
1511	{}
1512};
1513
1514MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
1515#endif
1516
1517static int tegra_qspi_probe(struct platform_device *pdev)
1518{
1519	struct spi_master	*master;
1520	struct tegra_qspi	*tqspi;
1521	struct resource		*r;
1522	int ret, qspi_irq;
1523	int bus_num;
1524
1525	master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
1526	if (!master)
1527		return -ENOMEM;
1528
1529	platform_set_drvdata(pdev, master);
1530	tqspi = spi_master_get_devdata(master);
1531
1532	master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
1533			    SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
1534	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
1535	master->setup = tegra_qspi_setup;
1536	master->transfer_one_message = tegra_qspi_transfer_one_message;
1537	master->num_chipselect = 1;
1538	master->auto_runtime_pm = true;
1539
1540	bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1541	if (bus_num >= 0)
1542		master->bus_num = bus_num;
1543
1544	tqspi->master = master;
1545	tqspi->dev = &pdev->dev;
1546	spin_lock_init(&tqspi->lock);
1547
1548	tqspi->soc_data = device_get_match_data(&pdev->dev);
1549	master->num_chipselect = tqspi->soc_data->cs_count;
1550	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1551	tqspi->base = devm_ioremap_resource(&pdev->dev, r);
1552	if (IS_ERR(tqspi->base))
1553		return PTR_ERR(tqspi->base);
1554
1555	tqspi->phys = r->start;
1556	qspi_irq = platform_get_irq(pdev, 0);
1557	if (qspi_irq < 0)
1558		return qspi_irq;
1559	tqspi->irq = qspi_irq;
1560
1561	if (!has_acpi_companion(tqspi->dev)) {
1562		tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
1563		if (IS_ERR(tqspi->clk)) {
1564			ret = PTR_ERR(tqspi->clk);
1565			dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
1566			return ret;
1567		}
1568
1569	}
1570
1571	tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
1572	tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
1573
1574	ret = tegra_qspi_init_dma(tqspi);
1575	if (ret < 0)
1576		return ret;
1577
1578	if (tqspi->use_dma)
1579		tqspi->max_buf_size = tqspi->dma_buf_size;
1580
1581	init_completion(&tqspi->tx_dma_complete);
1582	init_completion(&tqspi->rx_dma_complete);
1583	init_completion(&tqspi->xfer_completion);
1584
1585	pm_runtime_enable(&pdev->dev);
1586	ret = pm_runtime_resume_and_get(&pdev->dev);
1587	if (ret < 0) {
1588		dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
1589		goto exit_pm_disable;
1590	}
1591
1592	if (device_reset(tqspi->dev) < 0)
1593		dev_warn_once(tqspi->dev, "device reset failed\n");
1594
1595	tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW |  QSPI_CS_SW_VAL;
1596	tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1597	tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
1598	tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
1599	tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
1600
1601	pm_runtime_put(&pdev->dev);
1602
1603	ret = request_threaded_irq(tqspi->irq, NULL,
1604				   tegra_qspi_isr_thread, IRQF_ONESHOT,
1605				   dev_name(&pdev->dev), tqspi);
1606	if (ret < 0) {
1607		dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
1608		goto exit_pm_disable;
1609	}
1610
1611	master->dev.of_node = pdev->dev.of_node;
1612	ret = spi_register_master(master);
1613	if (ret < 0) {
1614		dev_err(&pdev->dev, "failed to register master: %d\n", ret);
1615		goto exit_free_irq;
1616	}
1617
1618	return 0;
1619
1620exit_free_irq:
1621	free_irq(qspi_irq, tqspi);
1622exit_pm_disable:
1623	pm_runtime_force_suspend(&pdev->dev);
1624	tegra_qspi_deinit_dma(tqspi);
1625	return ret;
1626}
1627
1628static int tegra_qspi_remove(struct platform_device *pdev)
1629{
1630	struct spi_master *master = platform_get_drvdata(pdev);
1631	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1632
1633	spi_unregister_master(master);
1634	free_irq(tqspi->irq, tqspi);
1635	pm_runtime_force_suspend(&pdev->dev);
1636	tegra_qspi_deinit_dma(tqspi);
1637
1638	return 0;
1639}
1640
1641static int __maybe_unused tegra_qspi_suspend(struct device *dev)
1642{
1643	struct spi_master *master = dev_get_drvdata(dev);
1644
1645	return spi_master_suspend(master);
1646}
1647
1648static int __maybe_unused tegra_qspi_resume(struct device *dev)
1649{
1650	struct spi_master *master = dev_get_drvdata(dev);
1651	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1652	int ret;
1653
1654	ret = pm_runtime_resume_and_get(dev);
1655	if (ret < 0) {
1656		dev_err(dev, "failed to get runtime PM: %d\n", ret);
1657		return ret;
1658	}
1659
1660	tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1661	tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
1662	pm_runtime_put(dev);
1663
1664	return spi_master_resume(master);
1665}
1666
1667static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
1668{
1669	struct spi_master *master = dev_get_drvdata(dev);
1670	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1671
1672	/* Runtime pm disabled with ACPI */
1673	if (has_acpi_companion(tqspi->dev))
1674		return 0;
1675	/* flush all write which are in PPSB queue by reading back */
1676	tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1677
1678	clk_disable_unprepare(tqspi->clk);
1679
1680	return 0;
1681}
1682
1683static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
1684{
1685	struct spi_master *master = dev_get_drvdata(dev);
1686	struct tegra_qspi *tqspi = spi_master_get_devdata(master);
1687	int ret;
1688
1689	/* Runtime pm disabled with ACPI */
1690	if (has_acpi_companion(tqspi->dev))
1691		return 0;
1692	ret = clk_prepare_enable(tqspi->clk);
1693	if (ret < 0)
1694		dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
1695
1696	return ret;
1697}
1698
1699static const struct dev_pm_ops tegra_qspi_pm_ops = {
1700	SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
1701	SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
1702};
1703
1704static struct platform_driver tegra_qspi_driver = {
1705	.driver = {
1706		.name		= "tegra-qspi",
1707		.pm		= &tegra_qspi_pm_ops,
1708		.of_match_table	= tegra_qspi_of_match,
1709		.acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
1710	},
1711	.probe =	tegra_qspi_probe,
1712	.remove =	tegra_qspi_remove,
1713};
1714module_platform_driver(tegra_qspi_driver);
1715
1716MODULE_ALIAS("platform:qspi-tegra");
1717MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
1718MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
1719MODULE_LICENSE("GPL v2");