Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/err.h>
   9#include <linux/interrupt.h>
  10#include <linux/io.h>
  11#include <linux/list.h>
  12#include <linux/module.h>
  13#include <linux/of.h>
  14#include <linux/of_device.h>
  15#include <linux/platform_device.h>
  16#include <linux/pm_runtime.h>
  17#include <linux/spi/spi.h>
  18#include <linux/dmaengine.h>
  19#include <linux/dma-mapping.h>
  20
  21#define QUP_CONFIG			0x0000
  22#define QUP_STATE			0x0004
  23#define QUP_IO_M_MODES			0x0008
  24#define QUP_SW_RESET			0x000c
  25#define QUP_OPERATIONAL			0x0018
  26#define QUP_ERROR_FLAGS			0x001c
  27#define QUP_ERROR_FLAGS_EN		0x0020
  28#define QUP_OPERATIONAL_MASK		0x0028
  29#define QUP_HW_VERSION			0x0030
  30#define QUP_MX_OUTPUT_CNT		0x0100
  31#define QUP_OUTPUT_FIFO			0x0110
  32#define QUP_MX_WRITE_CNT		0x0150
  33#define QUP_MX_INPUT_CNT		0x0200
  34#define QUP_MX_READ_CNT			0x0208
  35#define QUP_INPUT_FIFO			0x0218
  36
  37#define SPI_CONFIG			0x0300
  38#define SPI_IO_CONTROL			0x0304
  39#define SPI_ERROR_FLAGS			0x0308
  40#define SPI_ERROR_FLAGS_EN		0x030c
  41
  42/* QUP_CONFIG fields */
  43#define QUP_CONFIG_SPI_MODE		(1 << 8)
  44#define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
  45#define QUP_CONFIG_NO_INPUT		BIT(7)
  46#define QUP_CONFIG_NO_OUTPUT		BIT(6)
  47#define QUP_CONFIG_N			0x001f
  48
  49/* QUP_STATE fields */
  50#define QUP_STATE_VALID			BIT(2)
  51#define QUP_STATE_RESET			0
  52#define QUP_STATE_RUN			1
  53#define QUP_STATE_PAUSE			3
  54#define QUP_STATE_MASK			3
  55#define QUP_STATE_CLEAR			2
  56
  57#define QUP_HW_VERSION_2_1_1		0x20010001
  58
  59/* QUP_IO_M_MODES fields */
  60#define QUP_IO_M_PACK_EN		BIT(15)
  61#define QUP_IO_M_UNPACK_EN		BIT(14)
  62#define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
  63#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
  64#define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
  65#define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
  66
  67#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
  68#define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
  69#define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
  70#define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
  71
  72#define QUP_IO_M_MODE_FIFO		0
  73#define QUP_IO_M_MODE_BLOCK		1
  74#define QUP_IO_M_MODE_DMOV		2
  75#define QUP_IO_M_MODE_BAM		3
  76
  77/* QUP_OPERATIONAL fields */
  78#define QUP_OP_IN_BLOCK_READ_REQ	BIT(13)
  79#define QUP_OP_OUT_BLOCK_WRITE_REQ	BIT(12)
  80#define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
  81#define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
  82#define QUP_OP_IN_SERVICE_FLAG		BIT(9)
  83#define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
  84#define QUP_OP_IN_FIFO_FULL		BIT(7)
  85#define QUP_OP_OUT_FIFO_FULL		BIT(6)
  86#define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
  87#define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
  88
  89/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
  90#define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
  91#define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
  92#define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
  93#define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
  94
  95/* SPI_CONFIG fields */
  96#define SPI_CONFIG_HS_MODE		BIT(10)
  97#define SPI_CONFIG_INPUT_FIRST		BIT(9)
  98#define SPI_CONFIG_LOOPBACK		BIT(8)
  99
 100/* SPI_IO_CONTROL fields */
 101#define SPI_IO_C_FORCE_CS		BIT(11)
 102#define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
 103#define SPI_IO_C_MX_CS_MODE		BIT(8)
 104#define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
 105#define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
 106#define SPI_IO_C_CS_SELECT_MASK		0x000c
 107#define SPI_IO_C_TRISTATE_CS		BIT(1)
 108#define SPI_IO_C_NO_TRI_STATE		BIT(0)
 109
 110/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
 111#define SPI_ERROR_CLK_OVER_RUN		BIT(1)
 112#define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
 113
 114#define SPI_NUM_CHIPSELECTS		4
 115
 116#define SPI_MAX_XFER			(SZ_64K - 64)
 117
 118/* high speed mode is when bus rate is greater then 26MHz */
 119#define SPI_HS_MIN_RATE			26000000
 120#define SPI_MAX_RATE			50000000
 121
 122#define SPI_DELAY_THRESHOLD		1
 123#define SPI_DELAY_RETRY			10
 124
 125struct spi_qup {
 126	void __iomem		*base;
 127	struct device		*dev;
 128	struct clk		*cclk;	/* core clock */
 129	struct clk		*iclk;	/* interface clock */
 130	int			irq;
 131	spinlock_t		lock;
 132
 133	int			in_fifo_sz;
 134	int			out_fifo_sz;
 135	int			in_blk_sz;
 136	int			out_blk_sz;
 137
 138	struct spi_transfer	*xfer;
 139	struct completion	done;
 140	int			error;
 141	int			w_size;	/* bytes per SPI word */
 142	int			n_words;
 143	int			tx_bytes;
 144	int			rx_bytes;
 145	const u8		*tx_buf;
 146	u8			*rx_buf;
 147	int			qup_v1;
 148
 149	int			mode;
 150	struct dma_slave_config	rx_conf;
 151	struct dma_slave_config	tx_conf;
 152};
 153
 154static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
 155
 156static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
 157{
 158	u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
 159
 160	return (opflag & flag) != 0;
 161}
 162
 163static inline bool spi_qup_is_dma_xfer(int mode)
 164{
 165	if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
 166		return true;
 167
 168	return false;
 169}
 170
 171/* get's the transaction size length */
 172static inline unsigned int spi_qup_len(struct spi_qup *controller)
 173{
 174	return controller->n_words * controller->w_size;
 175}
 176
 177static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
 178{
 179	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
 180
 181	return opstate & QUP_STATE_VALID;
 182}
 183
 184static int spi_qup_set_state(struct spi_qup *controller, u32 state)
 185{
 186	unsigned long loop;
 187	u32 cur_state;
 188
 189	loop = 0;
 190	while (!spi_qup_is_valid_state(controller)) {
 191
 192		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
 193
 194		if (++loop > SPI_DELAY_RETRY)
 195			return -EIO;
 196	}
 197
 198	if (loop)
 199		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
 200			loop, state);
 201
 202	cur_state = readl_relaxed(controller->base + QUP_STATE);
 203	/*
 204	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
 205	 * of (b10) are required
 206	 */
 207	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
 208	    (state == QUP_STATE_RESET)) {
 209		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
 210		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
 211	} else {
 212		cur_state &= ~QUP_STATE_MASK;
 213		cur_state |= state;
 214		writel_relaxed(cur_state, controller->base + QUP_STATE);
 215	}
 216
 217	loop = 0;
 218	while (!spi_qup_is_valid_state(controller)) {
 219
 220		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
 221
 222		if (++loop > SPI_DELAY_RETRY)
 223			return -EIO;
 224	}
 225
 226	return 0;
 227}
 228
 229static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
 230{
 231	u8 *rx_buf = controller->rx_buf;
 232	int i, shift, num_bytes;
 233	u32 word;
 234
 235	for (; num_words; num_words--) {
 236
 237		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
 238
 239		num_bytes = min_t(int, spi_qup_len(controller) -
 240				       controller->rx_bytes,
 241				       controller->w_size);
 242
 243		if (!rx_buf) {
 244			controller->rx_bytes += num_bytes;
 245			continue;
 246		}
 247
 248		for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
 249			/*
 250			 * The data format depends on bytes per SPI word:
 251			 *  4 bytes: 0x12345678
 252			 *  2 bytes: 0x00001234
 253			 *  1 byte : 0x00000012
 254			 */
 255			shift = BITS_PER_BYTE;
 256			shift *= (controller->w_size - i - 1);
 257			rx_buf[controller->rx_bytes] = word >> shift;
 258		}
 259	}
 260}
 261
 262static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
 263{
 264	u32 remainder, words_per_block, num_words;
 265	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
 266
 267	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
 268				 controller->w_size);
 269	words_per_block = controller->in_blk_sz >> 2;
 270
 271	do {
 272		/* ACK by clearing service flag */
 273		writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
 274			       controller->base + QUP_OPERATIONAL);
 275
 276		if (!remainder)
 277			goto exit;
 278
 279		if (is_block_mode) {
 280			num_words = (remainder > words_per_block) ?
 281					words_per_block : remainder;
 282		} else {
 283			if (!spi_qup_is_flag_set(controller,
 284						 QUP_OP_IN_FIFO_NOT_EMPTY))
 285				break;
 286
 287			num_words = 1;
 288		}
 289
 290		/* read up to the maximum transfer size available */
 291		spi_qup_read_from_fifo(controller, num_words);
 292
 293		remainder -= num_words;
 294
 295		/* if block mode, check to see if next block is available */
 296		if (is_block_mode && !spi_qup_is_flag_set(controller,
 297					QUP_OP_IN_BLOCK_READ_REQ))
 298			break;
 299
 300	} while (remainder);
 301
 302	/*
 303	 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
 304	 * reads, it has to be cleared again at the very end.  However, be sure
 305	 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
 306	 * present and this is used to determine if transaction is complete
 307	 */
 308exit:
 309	if (!remainder) {
 310		*opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
 311		if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
 312			writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
 313				       controller->base + QUP_OPERATIONAL);
 314	}
 315}
 316
 317static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
 318{
 319	const u8 *tx_buf = controller->tx_buf;
 320	int i, num_bytes;
 321	u32 word, data;
 322
 323	for (; num_words; num_words--) {
 324		word = 0;
 325
 326		num_bytes = min_t(int, spi_qup_len(controller) -
 327				       controller->tx_bytes,
 328				       controller->w_size);
 329		if (tx_buf)
 330			for (i = 0; i < num_bytes; i++) {
 331				data = tx_buf[controller->tx_bytes + i];
 332				word |= data << (BITS_PER_BYTE * (3 - i));
 333			}
 334
 335		controller->tx_bytes += num_bytes;
 336
 337		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
 338	}
 339}
 340
 341static void spi_qup_dma_done(void *data)
 342{
 343	struct spi_qup *qup = data;
 344
 345	complete(&qup->done);
 346}
 347
 348static void spi_qup_write(struct spi_qup *controller)
 349{
 350	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
 351	u32 remainder, words_per_block, num_words;
 352
 353	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
 354				 controller->w_size);
 355	words_per_block = controller->out_blk_sz >> 2;
 356
 357	do {
 358		/* ACK by clearing service flag */
 359		writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
 360			       controller->base + QUP_OPERATIONAL);
 361
 362		/* make sure the interrupt is valid */
 363		if (!remainder)
 364			return;
 365
 366		if (is_block_mode) {
 367			num_words = (remainder > words_per_block) ?
 368				words_per_block : remainder;
 369		} else {
 370			if (spi_qup_is_flag_set(controller,
 371						QUP_OP_OUT_FIFO_FULL))
 372				break;
 373
 374			num_words = 1;
 375		}
 376
 377		spi_qup_write_to_fifo(controller, num_words);
 378
 379		remainder -= num_words;
 380
 381		/* if block mode, check to see if next block is available */
 382		if (is_block_mode && !spi_qup_is_flag_set(controller,
 383					QUP_OP_OUT_BLOCK_WRITE_REQ))
 384			break;
 385
 386	} while (remainder);
 387}
 388
 389static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
 390			   unsigned int nents, enum dma_transfer_direction dir,
 391			   dma_async_tx_callback callback)
 392{
 393	struct spi_qup *qup = spi_master_get_devdata(master);
 394	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
 395	struct dma_async_tx_descriptor *desc;
 396	struct dma_chan *chan;
 397	dma_cookie_t cookie;
 398
 399	if (dir == DMA_MEM_TO_DEV)
 400		chan = master->dma_tx;
 401	else
 402		chan = master->dma_rx;
 403
 404	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
 405	if (IS_ERR_OR_NULL(desc))
 406		return desc ? PTR_ERR(desc) : -EINVAL;
 407
 408	desc->callback = callback;
 409	desc->callback_param = qup;
 410
 411	cookie = dmaengine_submit(desc);
 412
 413	return dma_submit_error(cookie);
 414}
 415
 416static void spi_qup_dma_terminate(struct spi_master *master,
 417				  struct spi_transfer *xfer)
 418{
 419	if (xfer->tx_buf)
 420		dmaengine_terminate_all(master->dma_tx);
 421	if (xfer->rx_buf)
 422		dmaengine_terminate_all(master->dma_rx);
 423}
 424
 425static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
 426				     u32 *nents)
 427{
 428	struct scatterlist *sg;
 429	u32 total = 0;
 430
 431	for (sg = sgl; sg; sg = sg_next(sg)) {
 432		unsigned int len = sg_dma_len(sg);
 433
 434		/* check for overflow as well as limit */
 435		if (((total + len) < total) || ((total + len) > max))
 436			break;
 437
 438		total += len;
 439		(*nents)++;
 440	}
 441
 442	return total;
 443}
 444
 445static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
 446			  unsigned long timeout)
 447{
 448	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
 449	struct spi_master *master = spi->master;
 450	struct spi_qup *qup = spi_master_get_devdata(master);
 451	struct scatterlist *tx_sgl, *rx_sgl;
 452	int ret;
 453
 454	if (xfer->rx_buf)
 455		rx_done = spi_qup_dma_done;
 456	else if (xfer->tx_buf)
 457		tx_done = spi_qup_dma_done;
 458
 459	rx_sgl = xfer->rx_sg.sgl;
 460	tx_sgl = xfer->tx_sg.sgl;
 461
 462	do {
 463		u32 rx_nents = 0, tx_nents = 0;
 464
 465		if (rx_sgl)
 466			qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
 467					SPI_MAX_XFER, &rx_nents) / qup->w_size;
 468		if (tx_sgl)
 469			qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
 470					SPI_MAX_XFER, &tx_nents) / qup->w_size;
 471		if (!qup->n_words)
 472			return -EIO;
 473
 474		ret = spi_qup_io_config(spi, xfer);
 475		if (ret)
 476			return ret;
 477
 478		/* before issuing the descriptors, set the QUP to run */
 479		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 480		if (ret) {
 481			dev_warn(qup->dev, "cannot set RUN state\n");
 482			return ret;
 483		}
 484		if (rx_sgl) {
 485			ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
 486					      DMA_DEV_TO_MEM, rx_done);
 487			if (ret)
 488				return ret;
 489			dma_async_issue_pending(master->dma_rx);
 490		}
 491
 492		if (tx_sgl) {
 493			ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
 494					      DMA_MEM_TO_DEV, tx_done);
 495			if (ret)
 496				return ret;
 497
 498			dma_async_issue_pending(master->dma_tx);
 499		}
 500
 501		if (!wait_for_completion_timeout(&qup->done, timeout))
 502			return -ETIMEDOUT;
 503
 504		for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
 505			;
 506		for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
 507			;
 508
 509	} while (rx_sgl || tx_sgl);
 510
 511	return 0;
 512}
 513
 514static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
 515			  unsigned long timeout)
 516{
 517	struct spi_master *master = spi->master;
 518	struct spi_qup *qup = spi_master_get_devdata(master);
 519	int ret, n_words, iterations, offset = 0;
 520
 521	n_words = qup->n_words;
 522	iterations = n_words / SPI_MAX_XFER; /* round down */
 523	qup->rx_buf = xfer->rx_buf;
 524	qup->tx_buf = xfer->tx_buf;
 525
 526	do {
 527		if (iterations)
 528			qup->n_words = SPI_MAX_XFER;
 529		else
 530			qup->n_words = n_words % SPI_MAX_XFER;
 531
 532		if (qup->tx_buf && offset)
 533			qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
 534
 535		if (qup->rx_buf && offset)
 536			qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
 537
 538		/*
 539		 * if the transaction is small enough, we need
 540		 * to fallback to FIFO mode
 541		 */
 542		if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
 543			qup->mode = QUP_IO_M_MODE_FIFO;
 544
 545		ret = spi_qup_io_config(spi, xfer);
 546		if (ret)
 547			return ret;
 548
 549		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 550		if (ret) {
 551			dev_warn(qup->dev, "cannot set RUN state\n");
 552			return ret;
 553		}
 554
 555		ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
 556		if (ret) {
 557			dev_warn(qup->dev, "cannot set PAUSE state\n");
 558			return ret;
 559		}
 560
 561		if (qup->mode == QUP_IO_M_MODE_FIFO)
 562			spi_qup_write(qup);
 563
 564		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 565		if (ret) {
 566			dev_warn(qup->dev, "cannot set RUN state\n");
 567			return ret;
 568		}
 569
 570		if (!wait_for_completion_timeout(&qup->done, timeout))
 571			return -ETIMEDOUT;
 572
 573		offset++;
 574	} while (iterations--);
 575
 576	return 0;
 577}
 578
 579static bool spi_qup_data_pending(struct spi_qup *controller)
 580{
 581	unsigned int remainder_tx, remainder_rx;
 582
 583	remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
 584				    controller->tx_bytes, controller->w_size);
 585
 586	remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
 587				    controller->rx_bytes, controller->w_size);
 588
 589	return remainder_tx || remainder_rx;
 590}
 591
 592static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
 593{
 594	struct spi_qup *controller = dev_id;
 595	u32 opflags, qup_err, spi_err;
 596	int error = 0;
 597
 598	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
 599	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
 600	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
 601
 602	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
 603	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
 604
 605	if (qup_err) {
 606		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
 607			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
 608		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
 609			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
 610		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
 611			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
 612		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
 613			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
 614
 615		error = -EIO;
 616	}
 617
 618	if (spi_err) {
 619		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
 620			dev_warn(controller->dev, "CLK_OVER_RUN\n");
 621		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
 622			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
 623
 624		error = -EIO;
 625	}
 626
 627	spin_lock(&controller->lock);
 628	if (!controller->error)
 629		controller->error = error;
 630	spin_unlock(&controller->lock);
 631
 632	if (spi_qup_is_dma_xfer(controller->mode)) {
 633		writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
 634	} else {
 635		if (opflags & QUP_OP_IN_SERVICE_FLAG)
 636			spi_qup_read(controller, &opflags);
 637
 638		if (opflags & QUP_OP_OUT_SERVICE_FLAG)
 639			spi_qup_write(controller);
 640
 641		if (!spi_qup_data_pending(controller))
 642			complete(&controller->done);
 643	}
 644
 645	if (error)
 646		complete(&controller->done);
 647
 648	if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
 649		if (!spi_qup_is_dma_xfer(controller->mode)) {
 650			if (spi_qup_data_pending(controller))
 651				return IRQ_HANDLED;
 652		}
 653		complete(&controller->done);
 654	}
 655
 656	return IRQ_HANDLED;
 657}
 658
 659/* set clock freq ... bits per word, determine mode */
 660static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
 661{
 662	struct spi_qup *controller = spi_master_get_devdata(spi->master);
 663	int ret;
 664
 665	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
 666		dev_err(controller->dev, "too big size for loopback %d > %d\n",
 667			xfer->len, controller->in_fifo_sz);
 668		return -EIO;
 669	}
 670
 671	ret = clk_set_rate(controller->cclk, xfer->speed_hz);
 672	if (ret) {
 673		dev_err(controller->dev, "fail to set frequency %d",
 674			xfer->speed_hz);
 675		return -EIO;
 676	}
 677
 678	controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
 679	controller->n_words = xfer->len / controller->w_size;
 680
 681	if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
 682		controller->mode = QUP_IO_M_MODE_FIFO;
 683	else if (spi->master->can_dma &&
 684		 spi->master->can_dma(spi->master, spi, xfer) &&
 685		 spi->master->cur_msg_mapped)
 686		controller->mode = QUP_IO_M_MODE_BAM;
 687	else
 688		controller->mode = QUP_IO_M_MODE_BLOCK;
 689
 690	return 0;
 691}
 692
 693/* prep qup for another spi transaction of specific type */
 694static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
 695{
 696	struct spi_qup *controller = spi_master_get_devdata(spi->master);
 697	u32 config, iomode, control;
 698	unsigned long flags;
 699
 700	spin_lock_irqsave(&controller->lock, flags);
 701	controller->xfer     = xfer;
 702	controller->error    = 0;
 703	controller->rx_bytes = 0;
 704	controller->tx_bytes = 0;
 705	spin_unlock_irqrestore(&controller->lock, flags);
 706
 707
 708	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
 709		dev_err(controller->dev, "cannot set RESET state\n");
 710		return -EIO;
 711	}
 712
 713	switch (controller->mode) {
 714	case QUP_IO_M_MODE_FIFO:
 715		writel_relaxed(controller->n_words,
 716			       controller->base + QUP_MX_READ_CNT);
 717		writel_relaxed(controller->n_words,
 718			       controller->base + QUP_MX_WRITE_CNT);
 719		/* must be zero for FIFO */
 720		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
 721		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
 722		break;
 723	case QUP_IO_M_MODE_BAM:
 724		writel_relaxed(controller->n_words,
 725			       controller->base + QUP_MX_INPUT_CNT);
 726		writel_relaxed(controller->n_words,
 727			       controller->base + QUP_MX_OUTPUT_CNT);
 728		/* must be zero for BLOCK and BAM */
 729		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
 730		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
 731
 732		if (!controller->qup_v1) {
 733			void __iomem *input_cnt;
 734
 735			input_cnt = controller->base + QUP_MX_INPUT_CNT;
 736			/*
 737			 * for DMA transfers, both QUP_MX_INPUT_CNT and
 738			 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
 739			 * That case is a non-balanced transfer when there is
 740			 * only a rx_buf.
 741			 */
 742			if (xfer->tx_buf)
 743				writel_relaxed(0, input_cnt);
 744			else
 745				writel_relaxed(controller->n_words, input_cnt);
 746
 747			writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
 748		}
 749		break;
 750	case QUP_IO_M_MODE_BLOCK:
 751		reinit_completion(&controller->done);
 752		writel_relaxed(controller->n_words,
 753			       controller->base + QUP_MX_INPUT_CNT);
 754		writel_relaxed(controller->n_words,
 755			       controller->base + QUP_MX_OUTPUT_CNT);
 756		/* must be zero for BLOCK and BAM */
 757		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
 758		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
 759		break;
 760	default:
 761		dev_err(controller->dev, "unknown mode = %d\n",
 762				controller->mode);
 763		return -EIO;
 764	}
 765
 766	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
 767	/* Set input and output transfer mode */
 768	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
 769
 770	if (!spi_qup_is_dma_xfer(controller->mode))
 771		iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
 772	else
 773		iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
 774
 775	iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
 776	iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
 777
 778	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
 779
 780	control = readl_relaxed(controller->base + SPI_IO_CONTROL);
 781
 782	if (spi->mode & SPI_CPOL)
 783		control |= SPI_IO_C_CLK_IDLE_HIGH;
 784	else
 785		control &= ~SPI_IO_C_CLK_IDLE_HIGH;
 786
 787	writel_relaxed(control, controller->base + SPI_IO_CONTROL);
 788
 789	config = readl_relaxed(controller->base + SPI_CONFIG);
 790
 791	if (spi->mode & SPI_LOOP)
 792		config |= SPI_CONFIG_LOOPBACK;
 793	else
 794		config &= ~SPI_CONFIG_LOOPBACK;
 795
 796	if (spi->mode & SPI_CPHA)
 797		config &= ~SPI_CONFIG_INPUT_FIRST;
 798	else
 799		config |= SPI_CONFIG_INPUT_FIRST;
 800
 801	/*
 802	 * HS_MODE improves signal stability for spi-clk high rates,
 803	 * but is invalid in loop back mode.
 804	 */
 805	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
 806		config |= SPI_CONFIG_HS_MODE;
 807	else
 808		config &= ~SPI_CONFIG_HS_MODE;
 809
 810	writel_relaxed(config, controller->base + SPI_CONFIG);
 811
 812	config = readl_relaxed(controller->base + QUP_CONFIG);
 813	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
 814	config |= xfer->bits_per_word - 1;
 815	config |= QUP_CONFIG_SPI_MODE;
 816
 817	if (spi_qup_is_dma_xfer(controller->mode)) {
 818		if (!xfer->tx_buf)
 819			config |= QUP_CONFIG_NO_OUTPUT;
 820		if (!xfer->rx_buf)
 821			config |= QUP_CONFIG_NO_INPUT;
 822	}
 823
 824	writel_relaxed(config, controller->base + QUP_CONFIG);
 825
 826	/* only write to OPERATIONAL_MASK when register is present */
 827	if (!controller->qup_v1) {
 828		u32 mask = 0;
 829
 830		/*
 831		 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
 832		 * status change in BAM mode
 833		 */
 834
 835		if (spi_qup_is_dma_xfer(controller->mode))
 836			mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
 837
 838		writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
 839	}
 840
 841	return 0;
 842}
 843
 844static int spi_qup_transfer_one(struct spi_master *master,
 845			      struct spi_device *spi,
 846			      struct spi_transfer *xfer)
 847{
 848	struct spi_qup *controller = spi_master_get_devdata(master);
 849	unsigned long timeout, flags;
 850	int ret;
 851
 852	ret = spi_qup_io_prep(spi, xfer);
 853	if (ret)
 854		return ret;
 855
 856	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
 857	timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
 858				     xfer->len) * 8, timeout);
 859	timeout = 100 * msecs_to_jiffies(timeout);
 860
 861	reinit_completion(&controller->done);
 862
 863	spin_lock_irqsave(&controller->lock, flags);
 864	controller->xfer     = xfer;
 865	controller->error    = 0;
 866	controller->rx_bytes = 0;
 867	controller->tx_bytes = 0;
 868	spin_unlock_irqrestore(&controller->lock, flags);
 869
 870	if (spi_qup_is_dma_xfer(controller->mode))
 871		ret = spi_qup_do_dma(spi, xfer, timeout);
 872	else
 873		ret = spi_qup_do_pio(spi, xfer, timeout);
 874
 
 
 
 
 875	spi_qup_set_state(controller, QUP_STATE_RESET);
 876	spin_lock_irqsave(&controller->lock, flags);
 877	if (!ret)
 878		ret = controller->error;
 879	spin_unlock_irqrestore(&controller->lock, flags);
 880
 881	if (ret && spi_qup_is_dma_xfer(controller->mode))
 882		spi_qup_dma_terminate(master, xfer);
 883
 884	return ret;
 885}
 886
 887static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
 888			    struct spi_transfer *xfer)
 889{
 890	struct spi_qup *qup = spi_master_get_devdata(master);
 891	size_t dma_align = dma_get_cache_alignment();
 892	int n_words;
 893
 894	if (xfer->rx_buf) {
 895		if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
 896		    IS_ERR_OR_NULL(master->dma_rx))
 897			return false;
 898		if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
 899			return false;
 900	}
 901
 902	if (xfer->tx_buf) {
 903		if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
 904		    IS_ERR_OR_NULL(master->dma_tx))
 905			return false;
 906		if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
 907			return false;
 908	}
 909
 910	n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
 911	if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
 912		return false;
 913
 914	return true;
 915}
 916
 917static void spi_qup_release_dma(struct spi_master *master)
 918{
 919	if (!IS_ERR_OR_NULL(master->dma_rx))
 920		dma_release_channel(master->dma_rx);
 921	if (!IS_ERR_OR_NULL(master->dma_tx))
 922		dma_release_channel(master->dma_tx);
 923}
 924
 925static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
 926{
 927	struct spi_qup *spi = spi_master_get_devdata(master);
 928	struct dma_slave_config *rx_conf = &spi->rx_conf,
 929				*tx_conf = &spi->tx_conf;
 930	struct device *dev = spi->dev;
 931	int ret;
 932
 933	/* allocate dma resources, if available */
 934	master->dma_rx = dma_request_chan(dev, "rx");
 935	if (IS_ERR(master->dma_rx))
 936		return PTR_ERR(master->dma_rx);
 937
 938	master->dma_tx = dma_request_chan(dev, "tx");
 939	if (IS_ERR(master->dma_tx)) {
 940		ret = PTR_ERR(master->dma_tx);
 941		goto err_tx;
 942	}
 943
 944	/* set DMA parameters */
 945	rx_conf->direction = DMA_DEV_TO_MEM;
 946	rx_conf->device_fc = 1;
 947	rx_conf->src_addr = base + QUP_INPUT_FIFO;
 948	rx_conf->src_maxburst = spi->in_blk_sz;
 949
 950	tx_conf->direction = DMA_MEM_TO_DEV;
 951	tx_conf->device_fc = 1;
 952	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
 953	tx_conf->dst_maxburst = spi->out_blk_sz;
 954
 955	ret = dmaengine_slave_config(master->dma_rx, rx_conf);
 956	if (ret) {
 957		dev_err(dev, "failed to configure RX channel\n");
 958		goto err;
 959	}
 960
 961	ret = dmaengine_slave_config(master->dma_tx, tx_conf);
 962	if (ret) {
 963		dev_err(dev, "failed to configure TX channel\n");
 964		goto err;
 965	}
 966
 967	return 0;
 968
 969err:
 970	dma_release_channel(master->dma_tx);
 971err_tx:
 972	dma_release_channel(master->dma_rx);
 973	return ret;
 974}
 975
 976static void spi_qup_set_cs(struct spi_device *spi, bool val)
 977{
 978	struct spi_qup *controller;
 979	u32 spi_ioc;
 980	u32 spi_ioc_orig;
 981
 982	controller = spi_master_get_devdata(spi->master);
 983	spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
 984	spi_ioc_orig = spi_ioc;
 985	if (!val)
 986		spi_ioc |= SPI_IO_C_FORCE_CS;
 987	else
 988		spi_ioc &= ~SPI_IO_C_FORCE_CS;
 989
 990	if (spi_ioc != spi_ioc_orig)
 991		writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
 992}
 993
 994static int spi_qup_probe(struct platform_device *pdev)
 995{
 996	struct spi_master *master;
 997	struct clk *iclk, *cclk;
 998	struct spi_qup *controller;
 999	struct resource *res;
1000	struct device *dev;
1001	void __iomem *base;
1002	u32 max_freq, iomode, num_cs;
1003	int ret, irq, size;
1004
1005	dev = &pdev->dev;
1006	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1007	base = devm_ioremap_resource(dev, res);
1008	if (IS_ERR(base))
1009		return PTR_ERR(base);
1010
1011	irq = platform_get_irq(pdev, 0);
1012	if (irq < 0)
1013		return irq;
1014
1015	cclk = devm_clk_get(dev, "core");
1016	if (IS_ERR(cclk))
1017		return PTR_ERR(cclk);
1018
1019	iclk = devm_clk_get(dev, "iface");
1020	if (IS_ERR(iclk))
1021		return PTR_ERR(iclk);
1022
1023	/* This is optional parameter */
1024	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
1025		max_freq = SPI_MAX_RATE;
1026
1027	if (!max_freq || max_freq > SPI_MAX_RATE) {
1028		dev_err(dev, "invalid clock frequency %d\n", max_freq);
1029		return -ENXIO;
1030	}
1031
1032	ret = clk_prepare_enable(cclk);
1033	if (ret) {
1034		dev_err(dev, "cannot enable core clock\n");
1035		return ret;
1036	}
1037
1038	ret = clk_prepare_enable(iclk);
1039	if (ret) {
1040		clk_disable_unprepare(cclk);
1041		dev_err(dev, "cannot enable iface clock\n");
1042		return ret;
1043	}
1044
1045	master = spi_alloc_master(dev, sizeof(struct spi_qup));
1046	if (!master) {
1047		clk_disable_unprepare(cclk);
1048		clk_disable_unprepare(iclk);
1049		dev_err(dev, "cannot allocate master\n");
1050		return -ENOMEM;
1051	}
1052
1053	/* use num-cs unless not present or out of range */
1054	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1055	    num_cs > SPI_NUM_CHIPSELECTS)
1056		master->num_chipselect = SPI_NUM_CHIPSELECTS;
1057	else
1058		master->num_chipselect = num_cs;
1059
1060	master->use_gpio_descriptors = true;
1061	master->max_native_cs = SPI_NUM_CHIPSELECTS;
1062	master->bus_num = pdev->id;
1063	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1064	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1065	master->max_speed_hz = max_freq;
1066	master->transfer_one = spi_qup_transfer_one;
1067	master->dev.of_node = pdev->dev.of_node;
1068	master->auto_runtime_pm = true;
1069	master->dma_alignment = dma_get_cache_alignment();
1070	master->max_dma_len = SPI_MAX_XFER;
1071
1072	platform_set_drvdata(pdev, master);
1073
1074	controller = spi_master_get_devdata(master);
1075
1076	controller->dev = dev;
1077	controller->base = base;
1078	controller->iclk = iclk;
1079	controller->cclk = cclk;
1080	controller->irq = irq;
1081
1082	ret = spi_qup_init_dma(master, res->start);
1083	if (ret == -EPROBE_DEFER)
1084		goto error;
1085	else if (!ret)
1086		master->can_dma = spi_qup_can_dma;
1087
1088	controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1089
1090	if (!controller->qup_v1)
1091		master->set_cs = spi_qup_set_cs;
1092
1093	spin_lock_init(&controller->lock);
1094	init_completion(&controller->done);
1095
1096	iomode = readl_relaxed(base + QUP_IO_M_MODES);
1097
1098	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1099	if (size)
1100		controller->out_blk_sz = size * 16;
1101	else
1102		controller->out_blk_sz = 4;
1103
1104	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1105	if (size)
1106		controller->in_blk_sz = size * 16;
1107	else
1108		controller->in_blk_sz = 4;
1109
1110	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1111	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1112
1113	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1114	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1115
1116	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1117		 controller->in_blk_sz, controller->in_fifo_sz,
1118		 controller->out_blk_sz, controller->out_fifo_sz);
1119
1120	writel_relaxed(1, base + QUP_SW_RESET);
1121
1122	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1123	if (ret) {
1124		dev_err(dev, "cannot set RESET state\n");
1125		goto error_dma;
1126	}
1127
1128	writel_relaxed(0, base + QUP_OPERATIONAL);
1129	writel_relaxed(0, base + QUP_IO_M_MODES);
1130
1131	if (!controller->qup_v1)
1132		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1133
1134	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1135		       base + SPI_ERROR_FLAGS_EN);
1136
1137	/* if earlier version of the QUP, disable INPUT_OVERRUN */
1138	if (controller->qup_v1)
1139		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1140			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1141			base + QUP_ERROR_FLAGS_EN);
1142
1143	writel_relaxed(0, base + SPI_CONFIG);
1144	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1145
1146	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1147			       IRQF_TRIGGER_HIGH, pdev->name, controller);
1148	if (ret)
1149		goto error_dma;
1150
1151	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1152	pm_runtime_use_autosuspend(dev);
1153	pm_runtime_set_active(dev);
1154	pm_runtime_enable(dev);
1155
1156	ret = devm_spi_register_master(dev, master);
1157	if (ret)
1158		goto disable_pm;
1159
1160	return 0;
1161
1162disable_pm:
1163	pm_runtime_disable(&pdev->dev);
1164error_dma:
1165	spi_qup_release_dma(master);
1166error:
1167	clk_disable_unprepare(cclk);
1168	clk_disable_unprepare(iclk);
1169	spi_master_put(master);
1170	return ret;
1171}
1172
1173#ifdef CONFIG_PM
1174static int spi_qup_pm_suspend_runtime(struct device *device)
1175{
1176	struct spi_master *master = dev_get_drvdata(device);
1177	struct spi_qup *controller = spi_master_get_devdata(master);
1178	u32 config;
1179
1180	/* Enable clocks auto gaiting */
1181	config = readl(controller->base + QUP_CONFIG);
1182	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1183	writel_relaxed(config, controller->base + QUP_CONFIG);
1184
1185	clk_disable_unprepare(controller->cclk);
1186	clk_disable_unprepare(controller->iclk);
1187
1188	return 0;
1189}
1190
1191static int spi_qup_pm_resume_runtime(struct device *device)
1192{
1193	struct spi_master *master = dev_get_drvdata(device);
1194	struct spi_qup *controller = spi_master_get_devdata(master);
1195	u32 config;
1196	int ret;
1197
1198	ret = clk_prepare_enable(controller->iclk);
1199	if (ret)
1200		return ret;
1201
1202	ret = clk_prepare_enable(controller->cclk);
1203	if (ret) {
1204		clk_disable_unprepare(controller->iclk);
1205		return ret;
1206	}
1207
1208	/* Disable clocks auto gaiting */
1209	config = readl_relaxed(controller->base + QUP_CONFIG);
1210	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1211	writel_relaxed(config, controller->base + QUP_CONFIG);
1212	return 0;
1213}
1214#endif /* CONFIG_PM */
1215
1216#ifdef CONFIG_PM_SLEEP
1217static int spi_qup_suspend(struct device *device)
1218{
1219	struct spi_master *master = dev_get_drvdata(device);
1220	struct spi_qup *controller = spi_master_get_devdata(master);
1221	int ret;
1222
1223	if (pm_runtime_suspended(device)) {
1224		ret = spi_qup_pm_resume_runtime(device);
1225		if (ret)
1226			return ret;
1227	}
1228	ret = spi_master_suspend(master);
1229	if (ret)
1230		return ret;
1231
1232	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1233	if (ret)
1234		return ret;
1235
1236	clk_disable_unprepare(controller->cclk);
1237	clk_disable_unprepare(controller->iclk);
 
 
1238	return 0;
1239}
1240
1241static int spi_qup_resume(struct device *device)
1242{
1243	struct spi_master *master = dev_get_drvdata(device);
1244	struct spi_qup *controller = spi_master_get_devdata(master);
1245	int ret;
1246
1247	ret = clk_prepare_enable(controller->iclk);
1248	if (ret)
1249		return ret;
1250
1251	ret = clk_prepare_enable(controller->cclk);
1252	if (ret) {
1253		clk_disable_unprepare(controller->iclk);
1254		return ret;
1255	}
1256
1257	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1258	if (ret)
1259		goto disable_clk;
1260
1261	ret = spi_master_resume(master);
1262	if (ret)
1263		goto disable_clk;
1264
1265	return 0;
1266
1267disable_clk:
1268	clk_disable_unprepare(controller->cclk);
1269	clk_disable_unprepare(controller->iclk);
1270	return ret;
1271}
1272#endif /* CONFIG_PM_SLEEP */
1273
1274static int spi_qup_remove(struct platform_device *pdev)
1275{
1276	struct spi_master *master = dev_get_drvdata(&pdev->dev);
1277	struct spi_qup *controller = spi_master_get_devdata(master);
1278	int ret;
1279
1280	ret = pm_runtime_resume_and_get(&pdev->dev);
1281	if (ret < 0)
1282		return ret;
1283
1284	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1285	if (ret)
1286		return ret;
1287
1288	spi_qup_release_dma(master);
1289
1290	clk_disable_unprepare(controller->cclk);
1291	clk_disable_unprepare(controller->iclk);
1292
1293	pm_runtime_put_noidle(&pdev->dev);
1294	pm_runtime_disable(&pdev->dev);
1295
1296	return 0;
1297}
1298
1299static const struct of_device_id spi_qup_dt_match[] = {
1300	{ .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1301	{ .compatible = "qcom,spi-qup-v2.1.1", },
1302	{ .compatible = "qcom,spi-qup-v2.2.1", },
1303	{ }
1304};
1305MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1306
1307static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1308	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1309	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1310			   spi_qup_pm_resume_runtime,
1311			   NULL)
1312};
1313
1314static struct platform_driver spi_qup_driver = {
1315	.driver = {
1316		.name		= "spi_qup",
1317		.pm		= &spi_qup_dev_pm_ops,
1318		.of_match_table = spi_qup_dt_match,
1319	},
1320	.probe = spi_qup_probe,
1321	.remove = spi_qup_remove,
1322};
1323module_platform_driver(spi_qup_driver);
1324
1325MODULE_LICENSE("GPL v2");
1326MODULE_ALIAS("platform:spi_qup");
v4.17
 
   1/*
   2 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License rev 2 and
   6 * only rev 2 as published by the free Software foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/delay.h>
  16#include <linux/err.h>
  17#include <linux/interrupt.h>
  18#include <linux/io.h>
  19#include <linux/list.h>
  20#include <linux/module.h>
  21#include <linux/of.h>
  22#include <linux/of_device.h>
  23#include <linux/platform_device.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/spi/spi.h>
  26#include <linux/dmaengine.h>
  27#include <linux/dma-mapping.h>
  28
  29#define QUP_CONFIG			0x0000
  30#define QUP_STATE			0x0004
  31#define QUP_IO_M_MODES			0x0008
  32#define QUP_SW_RESET			0x000c
  33#define QUP_OPERATIONAL			0x0018
  34#define QUP_ERROR_FLAGS			0x001c
  35#define QUP_ERROR_FLAGS_EN		0x0020
  36#define QUP_OPERATIONAL_MASK		0x0028
  37#define QUP_HW_VERSION			0x0030
  38#define QUP_MX_OUTPUT_CNT		0x0100
  39#define QUP_OUTPUT_FIFO			0x0110
  40#define QUP_MX_WRITE_CNT		0x0150
  41#define QUP_MX_INPUT_CNT		0x0200
  42#define QUP_MX_READ_CNT			0x0208
  43#define QUP_INPUT_FIFO			0x0218
  44
  45#define SPI_CONFIG			0x0300
  46#define SPI_IO_CONTROL			0x0304
  47#define SPI_ERROR_FLAGS			0x0308
  48#define SPI_ERROR_FLAGS_EN		0x030c
  49
  50/* QUP_CONFIG fields */
  51#define QUP_CONFIG_SPI_MODE		(1 << 8)
  52#define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
  53#define QUP_CONFIG_NO_INPUT		BIT(7)
  54#define QUP_CONFIG_NO_OUTPUT		BIT(6)
  55#define QUP_CONFIG_N			0x001f
  56
  57/* QUP_STATE fields */
  58#define QUP_STATE_VALID			BIT(2)
  59#define QUP_STATE_RESET			0
  60#define QUP_STATE_RUN			1
  61#define QUP_STATE_PAUSE			3
  62#define QUP_STATE_MASK			3
  63#define QUP_STATE_CLEAR			2
  64
  65#define QUP_HW_VERSION_2_1_1		0x20010001
  66
  67/* QUP_IO_M_MODES fields */
  68#define QUP_IO_M_PACK_EN		BIT(15)
  69#define QUP_IO_M_UNPACK_EN		BIT(14)
  70#define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
  71#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
  72#define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
  73#define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
  74
  75#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
  76#define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
  77#define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
  78#define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
  79
  80#define QUP_IO_M_MODE_FIFO		0
  81#define QUP_IO_M_MODE_BLOCK		1
  82#define QUP_IO_M_MODE_DMOV		2
  83#define QUP_IO_M_MODE_BAM		3
  84
  85/* QUP_OPERATIONAL fields */
  86#define QUP_OP_IN_BLOCK_READ_REQ	BIT(13)
  87#define QUP_OP_OUT_BLOCK_WRITE_REQ	BIT(12)
  88#define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
  89#define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
  90#define QUP_OP_IN_SERVICE_FLAG		BIT(9)
  91#define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
  92#define QUP_OP_IN_FIFO_FULL		BIT(7)
  93#define QUP_OP_OUT_FIFO_FULL		BIT(6)
  94#define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
  95#define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
  96
  97/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
  98#define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
  99#define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
 100#define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
 101#define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
 102
 103/* SPI_CONFIG fields */
 104#define SPI_CONFIG_HS_MODE		BIT(10)
 105#define SPI_CONFIG_INPUT_FIRST		BIT(9)
 106#define SPI_CONFIG_LOOPBACK		BIT(8)
 107
 108/* SPI_IO_CONTROL fields */
 109#define SPI_IO_C_FORCE_CS		BIT(11)
 110#define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
 111#define SPI_IO_C_MX_CS_MODE		BIT(8)
 112#define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
 113#define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
 114#define SPI_IO_C_CS_SELECT_MASK		0x000c
 115#define SPI_IO_C_TRISTATE_CS		BIT(1)
 116#define SPI_IO_C_NO_TRI_STATE		BIT(0)
 117
 118/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
 119#define SPI_ERROR_CLK_OVER_RUN		BIT(1)
 120#define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
 121
 122#define SPI_NUM_CHIPSELECTS		4
 123
 124#define SPI_MAX_XFER			(SZ_64K - 64)
 125
 126/* high speed mode is when bus rate is greater then 26MHz */
 127#define SPI_HS_MIN_RATE			26000000
 128#define SPI_MAX_RATE			50000000
 129
 130#define SPI_DELAY_THRESHOLD		1
 131#define SPI_DELAY_RETRY			10
 132
 133struct spi_qup {
 134	void __iomem		*base;
 135	struct device		*dev;
 136	struct clk		*cclk;	/* core clock */
 137	struct clk		*iclk;	/* interface clock */
 138	int			irq;
 139	spinlock_t		lock;
 140
 141	int			in_fifo_sz;
 142	int			out_fifo_sz;
 143	int			in_blk_sz;
 144	int			out_blk_sz;
 145
 146	struct spi_transfer	*xfer;
 147	struct completion	done;
 148	int			error;
 149	int			w_size;	/* bytes per SPI word */
 150	int			n_words;
 151	int			tx_bytes;
 152	int			rx_bytes;
 153	const u8		*tx_buf;
 154	u8			*rx_buf;
 155	int			qup_v1;
 156
 157	int			mode;
 158	struct dma_slave_config	rx_conf;
 159	struct dma_slave_config	tx_conf;
 160};
 161
 162static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
 163
 164static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
 165{
 166	u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
 167
 168	return (opflag & flag) != 0;
 169}
 170
 171static inline bool spi_qup_is_dma_xfer(int mode)
 172{
 173	if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
 174		return true;
 175
 176	return false;
 177}
 178
 179/* get's the transaction size length */
 180static inline unsigned int spi_qup_len(struct spi_qup *controller)
 181{
 182	return controller->n_words * controller->w_size;
 183}
 184
 185static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
 186{
 187	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
 188
 189	return opstate & QUP_STATE_VALID;
 190}
 191
 192static int spi_qup_set_state(struct spi_qup *controller, u32 state)
 193{
 194	unsigned long loop;
 195	u32 cur_state;
 196
 197	loop = 0;
 198	while (!spi_qup_is_valid_state(controller)) {
 199
 200		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
 201
 202		if (++loop > SPI_DELAY_RETRY)
 203			return -EIO;
 204	}
 205
 206	if (loop)
 207		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
 208			loop, state);
 209
 210	cur_state = readl_relaxed(controller->base + QUP_STATE);
 211	/*
 212	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
 213	 * of (b10) are required
 214	 */
 215	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
 216	    (state == QUP_STATE_RESET)) {
 217		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
 218		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
 219	} else {
 220		cur_state &= ~QUP_STATE_MASK;
 221		cur_state |= state;
 222		writel_relaxed(cur_state, controller->base + QUP_STATE);
 223	}
 224
 225	loop = 0;
 226	while (!spi_qup_is_valid_state(controller)) {
 227
 228		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
 229
 230		if (++loop > SPI_DELAY_RETRY)
 231			return -EIO;
 232	}
 233
 234	return 0;
 235}
 236
 237static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
 238{
 239	u8 *rx_buf = controller->rx_buf;
 240	int i, shift, num_bytes;
 241	u32 word;
 242
 243	for (; num_words; num_words--) {
 244
 245		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
 246
 247		num_bytes = min_t(int, spi_qup_len(controller) -
 248				       controller->rx_bytes,
 249				       controller->w_size);
 250
 251		if (!rx_buf) {
 252			controller->rx_bytes += num_bytes;
 253			continue;
 254		}
 255
 256		for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
 257			/*
 258			 * The data format depends on bytes per SPI word:
 259			 *  4 bytes: 0x12345678
 260			 *  2 bytes: 0x00001234
 261			 *  1 byte : 0x00000012
 262			 */
 263			shift = BITS_PER_BYTE;
 264			shift *= (controller->w_size - i - 1);
 265			rx_buf[controller->rx_bytes] = word >> shift;
 266		}
 267	}
 268}
 269
 270static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
 271{
 272	u32 remainder, words_per_block, num_words;
 273	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
 274
 275	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
 276				 controller->w_size);
 277	words_per_block = controller->in_blk_sz >> 2;
 278
 279	do {
 280		/* ACK by clearing service flag */
 281		writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
 282			       controller->base + QUP_OPERATIONAL);
 283
 
 
 
 284		if (is_block_mode) {
 285			num_words = (remainder > words_per_block) ?
 286					words_per_block : remainder;
 287		} else {
 288			if (!spi_qup_is_flag_set(controller,
 289						 QUP_OP_IN_FIFO_NOT_EMPTY))
 290				break;
 291
 292			num_words = 1;
 293		}
 294
 295		/* read up to the maximum transfer size available */
 296		spi_qup_read_from_fifo(controller, num_words);
 297
 298		remainder -= num_words;
 299
 300		/* if block mode, check to see if next block is available */
 301		if (is_block_mode && !spi_qup_is_flag_set(controller,
 302					QUP_OP_IN_BLOCK_READ_REQ))
 303			break;
 304
 305	} while (remainder);
 306
 307	/*
 308	 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
 309	 * reads, it has to be cleared again at the very end.  However, be sure
 310	 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
 311	 * present and this is used to determine if transaction is complete
 312	 */
 313	*opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
 314	if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
 315		writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
 316			       controller->base + QUP_OPERATIONAL);
 317
 
 
 318}
 319
 320static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
 321{
 322	const u8 *tx_buf = controller->tx_buf;
 323	int i, num_bytes;
 324	u32 word, data;
 325
 326	for (; num_words; num_words--) {
 327		word = 0;
 328
 329		num_bytes = min_t(int, spi_qup_len(controller) -
 330				       controller->tx_bytes,
 331				       controller->w_size);
 332		if (tx_buf)
 333			for (i = 0; i < num_bytes; i++) {
 334				data = tx_buf[controller->tx_bytes + i];
 335				word |= data << (BITS_PER_BYTE * (3 - i));
 336			}
 337
 338		controller->tx_bytes += num_bytes;
 339
 340		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
 341	}
 342}
 343
 344static void spi_qup_dma_done(void *data)
 345{
 346	struct spi_qup *qup = data;
 347
 348	complete(&qup->done);
 349}
 350
 351static void spi_qup_write(struct spi_qup *controller)
 352{
 353	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
 354	u32 remainder, words_per_block, num_words;
 355
 356	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
 357				 controller->w_size);
 358	words_per_block = controller->out_blk_sz >> 2;
 359
 360	do {
 361		/* ACK by clearing service flag */
 362		writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
 363			       controller->base + QUP_OPERATIONAL);
 364
 
 
 
 
 365		if (is_block_mode) {
 366			num_words = (remainder > words_per_block) ?
 367				words_per_block : remainder;
 368		} else {
 369			if (spi_qup_is_flag_set(controller,
 370						QUP_OP_OUT_FIFO_FULL))
 371				break;
 372
 373			num_words = 1;
 374		}
 375
 376		spi_qup_write_to_fifo(controller, num_words);
 377
 378		remainder -= num_words;
 379
 380		/* if block mode, check to see if next block is available */
 381		if (is_block_mode && !spi_qup_is_flag_set(controller,
 382					QUP_OP_OUT_BLOCK_WRITE_REQ))
 383			break;
 384
 385	} while (remainder);
 386}
 387
 388static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
 389			   unsigned int nents, enum dma_transfer_direction dir,
 390			   dma_async_tx_callback callback)
 391{
 392	struct spi_qup *qup = spi_master_get_devdata(master);
 393	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
 394	struct dma_async_tx_descriptor *desc;
 395	struct dma_chan *chan;
 396	dma_cookie_t cookie;
 397
 398	if (dir == DMA_MEM_TO_DEV)
 399		chan = master->dma_tx;
 400	else
 401		chan = master->dma_rx;
 402
 403	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
 404	if (IS_ERR_OR_NULL(desc))
 405		return desc ? PTR_ERR(desc) : -EINVAL;
 406
 407	desc->callback = callback;
 408	desc->callback_param = qup;
 409
 410	cookie = dmaengine_submit(desc);
 411
 412	return dma_submit_error(cookie);
 413}
 414
 415static void spi_qup_dma_terminate(struct spi_master *master,
 416				  struct spi_transfer *xfer)
 417{
 418	if (xfer->tx_buf)
 419		dmaengine_terminate_all(master->dma_tx);
 420	if (xfer->rx_buf)
 421		dmaengine_terminate_all(master->dma_rx);
 422}
 423
 424static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
 425				     u32 *nents)
 426{
 427	struct scatterlist *sg;
 428	u32 total = 0;
 429
 430	for (sg = sgl; sg; sg = sg_next(sg)) {
 431		unsigned int len = sg_dma_len(sg);
 432
 433		/* check for overflow as well as limit */
 434		if (((total + len) < total) || ((total + len) > max))
 435			break;
 436
 437		total += len;
 438		(*nents)++;
 439	}
 440
 441	return total;
 442}
 443
 444static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
 445			  unsigned long timeout)
 446{
 447	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
 448	struct spi_master *master = spi->master;
 449	struct spi_qup *qup = spi_master_get_devdata(master);
 450	struct scatterlist *tx_sgl, *rx_sgl;
 451	int ret;
 452
 453	if (xfer->rx_buf)
 454		rx_done = spi_qup_dma_done;
 455	else if (xfer->tx_buf)
 456		tx_done = spi_qup_dma_done;
 457
 458	rx_sgl = xfer->rx_sg.sgl;
 459	tx_sgl = xfer->tx_sg.sgl;
 460
 461	do {
 462		u32 rx_nents = 0, tx_nents = 0;
 463
 464		if (rx_sgl)
 465			qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
 466					SPI_MAX_XFER, &rx_nents) / qup->w_size;
 467		if (tx_sgl)
 468			qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
 469					SPI_MAX_XFER, &tx_nents) / qup->w_size;
 470		if (!qup->n_words)
 471			return -EIO;
 472
 473		ret = spi_qup_io_config(spi, xfer);
 474		if (ret)
 475			return ret;
 476
 477		/* before issuing the descriptors, set the QUP to run */
 478		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 479		if (ret) {
 480			dev_warn(qup->dev, "cannot set RUN state\n");
 481			return ret;
 482		}
 483		if (rx_sgl) {
 484			ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
 485					      DMA_DEV_TO_MEM, rx_done);
 486			if (ret)
 487				return ret;
 488			dma_async_issue_pending(master->dma_rx);
 489		}
 490
 491		if (tx_sgl) {
 492			ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
 493					      DMA_MEM_TO_DEV, tx_done);
 494			if (ret)
 495				return ret;
 496
 497			dma_async_issue_pending(master->dma_tx);
 498		}
 499
 500		if (!wait_for_completion_timeout(&qup->done, timeout))
 501			return -ETIMEDOUT;
 502
 503		for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
 504			;
 505		for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
 506			;
 507
 508	} while (rx_sgl || tx_sgl);
 509
 510	return 0;
 511}
 512
 513static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
 514			  unsigned long timeout)
 515{
 516	struct spi_master *master = spi->master;
 517	struct spi_qup *qup = spi_master_get_devdata(master);
 518	int ret, n_words, iterations, offset = 0;
 519
 520	n_words = qup->n_words;
 521	iterations = n_words / SPI_MAX_XFER; /* round down */
 522	qup->rx_buf = xfer->rx_buf;
 523	qup->tx_buf = xfer->tx_buf;
 524
 525	do {
 526		if (iterations)
 527			qup->n_words = SPI_MAX_XFER;
 528		else
 529			qup->n_words = n_words % SPI_MAX_XFER;
 530
 531		if (qup->tx_buf && offset)
 532			qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
 533
 534		if (qup->rx_buf && offset)
 535			qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
 536
 537		/*
 538		 * if the transaction is small enough, we need
 539		 * to fallback to FIFO mode
 540		 */
 541		if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
 542			qup->mode = QUP_IO_M_MODE_FIFO;
 543
 544		ret = spi_qup_io_config(spi, xfer);
 545		if (ret)
 546			return ret;
 547
 548		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 549		if (ret) {
 550			dev_warn(qup->dev, "cannot set RUN state\n");
 551			return ret;
 552		}
 553
 554		ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
 555		if (ret) {
 556			dev_warn(qup->dev, "cannot set PAUSE state\n");
 557			return ret;
 558		}
 559
 560		if (qup->mode == QUP_IO_M_MODE_FIFO)
 561			spi_qup_write(qup);
 562
 563		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 564		if (ret) {
 565			dev_warn(qup->dev, "cannot set RUN state\n");
 566			return ret;
 567		}
 568
 569		if (!wait_for_completion_timeout(&qup->done, timeout))
 570			return -ETIMEDOUT;
 571
 572		offset++;
 573	} while (iterations--);
 574
 575	return 0;
 576}
 577
 
 
 
 
 
 
 
 
 
 
 
 
 
 578static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
 579{
 580	struct spi_qup *controller = dev_id;
 581	u32 opflags, qup_err, spi_err;
 582	int error = 0;
 583
 584	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
 585	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
 586	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
 587
 588	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
 589	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
 590
 591	if (qup_err) {
 592		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
 593			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
 594		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
 595			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
 596		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
 597			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
 598		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
 599			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
 600
 601		error = -EIO;
 602	}
 603
 604	if (spi_err) {
 605		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
 606			dev_warn(controller->dev, "CLK_OVER_RUN\n");
 607		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
 608			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
 609
 610		error = -EIO;
 611	}
 612
 
 
 
 
 
 613	if (spi_qup_is_dma_xfer(controller->mode)) {
 614		writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
 615	} else {
 616		if (opflags & QUP_OP_IN_SERVICE_FLAG)
 617			spi_qup_read(controller, &opflags);
 618
 619		if (opflags & QUP_OP_OUT_SERVICE_FLAG)
 620			spi_qup_write(controller);
 
 
 
 621	}
 622
 623	if ((opflags & QUP_OP_MAX_INPUT_DONE_FLAG) || error)
 624		complete(&controller->done);
 625
 
 
 
 
 
 
 
 
 626	return IRQ_HANDLED;
 627}
 628
 629/* set clock freq ... bits per word, determine mode */
 630static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
 631{
 632	struct spi_qup *controller = spi_master_get_devdata(spi->master);
 633	int ret;
 634
 635	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
 636		dev_err(controller->dev, "too big size for loopback %d > %d\n",
 637			xfer->len, controller->in_fifo_sz);
 638		return -EIO;
 639	}
 640
 641	ret = clk_set_rate(controller->cclk, xfer->speed_hz);
 642	if (ret) {
 643		dev_err(controller->dev, "fail to set frequency %d",
 644			xfer->speed_hz);
 645		return -EIO;
 646	}
 647
 648	controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
 649	controller->n_words = xfer->len / controller->w_size;
 650
 651	if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
 652		controller->mode = QUP_IO_M_MODE_FIFO;
 653	else if (spi->master->can_dma &&
 654		 spi->master->can_dma(spi->master, spi, xfer) &&
 655		 spi->master->cur_msg_mapped)
 656		controller->mode = QUP_IO_M_MODE_BAM;
 657	else
 658		controller->mode = QUP_IO_M_MODE_BLOCK;
 659
 660	return 0;
 661}
 662
 663/* prep qup for another spi transaction of specific type */
 664static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
 665{
 666	struct spi_qup *controller = spi_master_get_devdata(spi->master);
 667	u32 config, iomode, control;
 668	unsigned long flags;
 669
 670	spin_lock_irqsave(&controller->lock, flags);
 671	controller->xfer     = xfer;
 672	controller->error    = 0;
 673	controller->rx_bytes = 0;
 674	controller->tx_bytes = 0;
 675	spin_unlock_irqrestore(&controller->lock, flags);
 676
 677
 678	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
 679		dev_err(controller->dev, "cannot set RESET state\n");
 680		return -EIO;
 681	}
 682
 683	switch (controller->mode) {
 684	case QUP_IO_M_MODE_FIFO:
 685		writel_relaxed(controller->n_words,
 686			       controller->base + QUP_MX_READ_CNT);
 687		writel_relaxed(controller->n_words,
 688			       controller->base + QUP_MX_WRITE_CNT);
 689		/* must be zero for FIFO */
 690		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
 691		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
 692		break;
 693	case QUP_IO_M_MODE_BAM:
 694		writel_relaxed(controller->n_words,
 695			       controller->base + QUP_MX_INPUT_CNT);
 696		writel_relaxed(controller->n_words,
 697			       controller->base + QUP_MX_OUTPUT_CNT);
 698		/* must be zero for BLOCK and BAM */
 699		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
 700		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
 701
 702		if (!controller->qup_v1) {
 703			void __iomem *input_cnt;
 704
 705			input_cnt = controller->base + QUP_MX_INPUT_CNT;
 706			/*
 707			 * for DMA transfers, both QUP_MX_INPUT_CNT and
 708			 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
 709			 * That case is a non-balanced transfer when there is
 710			 * only a rx_buf.
 711			 */
 712			if (xfer->tx_buf)
 713				writel_relaxed(0, input_cnt);
 714			else
 715				writel_relaxed(controller->n_words, input_cnt);
 716
 717			writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
 718		}
 719		break;
 720	case QUP_IO_M_MODE_BLOCK:
 721		reinit_completion(&controller->done);
 722		writel_relaxed(controller->n_words,
 723			       controller->base + QUP_MX_INPUT_CNT);
 724		writel_relaxed(controller->n_words,
 725			       controller->base + QUP_MX_OUTPUT_CNT);
 726		/* must be zero for BLOCK and BAM */
 727		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
 728		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
 729		break;
 730	default:
 731		dev_err(controller->dev, "unknown mode = %d\n",
 732				controller->mode);
 733		return -EIO;
 734	}
 735
 736	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
 737	/* Set input and output transfer mode */
 738	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
 739
 740	if (!spi_qup_is_dma_xfer(controller->mode))
 741		iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
 742	else
 743		iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
 744
 745	iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
 746	iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
 747
 748	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
 749
 750	control = readl_relaxed(controller->base + SPI_IO_CONTROL);
 751
 752	if (spi->mode & SPI_CPOL)
 753		control |= SPI_IO_C_CLK_IDLE_HIGH;
 754	else
 755		control &= ~SPI_IO_C_CLK_IDLE_HIGH;
 756
 757	writel_relaxed(control, controller->base + SPI_IO_CONTROL);
 758
 759	config = readl_relaxed(controller->base + SPI_CONFIG);
 760
 761	if (spi->mode & SPI_LOOP)
 762		config |= SPI_CONFIG_LOOPBACK;
 763	else
 764		config &= ~SPI_CONFIG_LOOPBACK;
 765
 766	if (spi->mode & SPI_CPHA)
 767		config &= ~SPI_CONFIG_INPUT_FIRST;
 768	else
 769		config |= SPI_CONFIG_INPUT_FIRST;
 770
 771	/*
 772	 * HS_MODE improves signal stability for spi-clk high rates,
 773	 * but is invalid in loop back mode.
 774	 */
 775	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
 776		config |= SPI_CONFIG_HS_MODE;
 777	else
 778		config &= ~SPI_CONFIG_HS_MODE;
 779
 780	writel_relaxed(config, controller->base + SPI_CONFIG);
 781
 782	config = readl_relaxed(controller->base + QUP_CONFIG);
 783	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
 784	config |= xfer->bits_per_word - 1;
 785	config |= QUP_CONFIG_SPI_MODE;
 786
 787	if (spi_qup_is_dma_xfer(controller->mode)) {
 788		if (!xfer->tx_buf)
 789			config |= QUP_CONFIG_NO_OUTPUT;
 790		if (!xfer->rx_buf)
 791			config |= QUP_CONFIG_NO_INPUT;
 792	}
 793
 794	writel_relaxed(config, controller->base + QUP_CONFIG);
 795
 796	/* only write to OPERATIONAL_MASK when register is present */
 797	if (!controller->qup_v1) {
 798		u32 mask = 0;
 799
 800		/*
 801		 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
 802		 * status change in BAM mode
 803		 */
 804
 805		if (spi_qup_is_dma_xfer(controller->mode))
 806			mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
 807
 808		writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
 809	}
 810
 811	return 0;
 812}
 813
 814static int spi_qup_transfer_one(struct spi_master *master,
 815			      struct spi_device *spi,
 816			      struct spi_transfer *xfer)
 817{
 818	struct spi_qup *controller = spi_master_get_devdata(master);
 819	unsigned long timeout, flags;
 820	int ret = -EIO;
 821
 822	ret = spi_qup_io_prep(spi, xfer);
 823	if (ret)
 824		return ret;
 825
 826	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
 827	timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
 828				     xfer->len) * 8, timeout);
 829	timeout = 100 * msecs_to_jiffies(timeout);
 830
 831	reinit_completion(&controller->done);
 832
 833	spin_lock_irqsave(&controller->lock, flags);
 834	controller->xfer     = xfer;
 835	controller->error    = 0;
 836	controller->rx_bytes = 0;
 837	controller->tx_bytes = 0;
 838	spin_unlock_irqrestore(&controller->lock, flags);
 839
 840	if (spi_qup_is_dma_xfer(controller->mode))
 841		ret = spi_qup_do_dma(spi, xfer, timeout);
 842	else
 843		ret = spi_qup_do_pio(spi, xfer, timeout);
 844
 845	if (ret)
 846		goto exit;
 847
 848exit:
 849	spi_qup_set_state(controller, QUP_STATE_RESET);
 850	spin_lock_irqsave(&controller->lock, flags);
 851	if (!ret)
 852		ret = controller->error;
 853	spin_unlock_irqrestore(&controller->lock, flags);
 854
 855	if (ret && spi_qup_is_dma_xfer(controller->mode))
 856		spi_qup_dma_terminate(master, xfer);
 857
 858	return ret;
 859}
 860
 861static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
 862			    struct spi_transfer *xfer)
 863{
 864	struct spi_qup *qup = spi_master_get_devdata(master);
 865	size_t dma_align = dma_get_cache_alignment();
 866	int n_words;
 867
 868	if (xfer->rx_buf) {
 869		if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
 870		    IS_ERR_OR_NULL(master->dma_rx))
 871			return false;
 872		if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
 873			return false;
 874	}
 875
 876	if (xfer->tx_buf) {
 877		if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
 878		    IS_ERR_OR_NULL(master->dma_tx))
 879			return false;
 880		if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
 881			return false;
 882	}
 883
 884	n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
 885	if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
 886		return false;
 887
 888	return true;
 889}
 890
 891static void spi_qup_release_dma(struct spi_master *master)
 892{
 893	if (!IS_ERR_OR_NULL(master->dma_rx))
 894		dma_release_channel(master->dma_rx);
 895	if (!IS_ERR_OR_NULL(master->dma_tx))
 896		dma_release_channel(master->dma_tx);
 897}
 898
 899static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
 900{
 901	struct spi_qup *spi = spi_master_get_devdata(master);
 902	struct dma_slave_config *rx_conf = &spi->rx_conf,
 903				*tx_conf = &spi->tx_conf;
 904	struct device *dev = spi->dev;
 905	int ret;
 906
 907	/* allocate dma resources, if available */
 908	master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
 909	if (IS_ERR(master->dma_rx))
 910		return PTR_ERR(master->dma_rx);
 911
 912	master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
 913	if (IS_ERR(master->dma_tx)) {
 914		ret = PTR_ERR(master->dma_tx);
 915		goto err_tx;
 916	}
 917
 918	/* set DMA parameters */
 919	rx_conf->direction = DMA_DEV_TO_MEM;
 920	rx_conf->device_fc = 1;
 921	rx_conf->src_addr = base + QUP_INPUT_FIFO;
 922	rx_conf->src_maxburst = spi->in_blk_sz;
 923
 924	tx_conf->direction = DMA_MEM_TO_DEV;
 925	tx_conf->device_fc = 1;
 926	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
 927	tx_conf->dst_maxburst = spi->out_blk_sz;
 928
 929	ret = dmaengine_slave_config(master->dma_rx, rx_conf);
 930	if (ret) {
 931		dev_err(dev, "failed to configure RX channel\n");
 932		goto err;
 933	}
 934
 935	ret = dmaengine_slave_config(master->dma_tx, tx_conf);
 936	if (ret) {
 937		dev_err(dev, "failed to configure TX channel\n");
 938		goto err;
 939	}
 940
 941	return 0;
 942
 943err:
 944	dma_release_channel(master->dma_tx);
 945err_tx:
 946	dma_release_channel(master->dma_rx);
 947	return ret;
 948}
 949
 950static void spi_qup_set_cs(struct spi_device *spi, bool val)
 951{
 952	struct spi_qup *controller;
 953	u32 spi_ioc;
 954	u32 spi_ioc_orig;
 955
 956	controller = spi_master_get_devdata(spi->master);
 957	spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
 958	spi_ioc_orig = spi_ioc;
 959	if (!val)
 960		spi_ioc |= SPI_IO_C_FORCE_CS;
 961	else
 962		spi_ioc &= ~SPI_IO_C_FORCE_CS;
 963
 964	if (spi_ioc != spi_ioc_orig)
 965		writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
 966}
 967
 968static int spi_qup_probe(struct platform_device *pdev)
 969{
 970	struct spi_master *master;
 971	struct clk *iclk, *cclk;
 972	struct spi_qup *controller;
 973	struct resource *res;
 974	struct device *dev;
 975	void __iomem *base;
 976	u32 max_freq, iomode, num_cs;
 977	int ret, irq, size;
 978
 979	dev = &pdev->dev;
 980	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 981	base = devm_ioremap_resource(dev, res);
 982	if (IS_ERR(base))
 983		return PTR_ERR(base);
 984
 985	irq = platform_get_irq(pdev, 0);
 986	if (irq < 0)
 987		return irq;
 988
 989	cclk = devm_clk_get(dev, "core");
 990	if (IS_ERR(cclk))
 991		return PTR_ERR(cclk);
 992
 993	iclk = devm_clk_get(dev, "iface");
 994	if (IS_ERR(iclk))
 995		return PTR_ERR(iclk);
 996
 997	/* This is optional parameter */
 998	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
 999		max_freq = SPI_MAX_RATE;
1000
1001	if (!max_freq || max_freq > SPI_MAX_RATE) {
1002		dev_err(dev, "invalid clock frequency %d\n", max_freq);
1003		return -ENXIO;
1004	}
1005
1006	ret = clk_prepare_enable(cclk);
1007	if (ret) {
1008		dev_err(dev, "cannot enable core clock\n");
1009		return ret;
1010	}
1011
1012	ret = clk_prepare_enable(iclk);
1013	if (ret) {
1014		clk_disable_unprepare(cclk);
1015		dev_err(dev, "cannot enable iface clock\n");
1016		return ret;
1017	}
1018
1019	master = spi_alloc_master(dev, sizeof(struct spi_qup));
1020	if (!master) {
1021		clk_disable_unprepare(cclk);
1022		clk_disable_unprepare(iclk);
1023		dev_err(dev, "cannot allocate master\n");
1024		return -ENOMEM;
1025	}
1026
1027	/* use num-cs unless not present or out of range */
1028	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1029	    num_cs > SPI_NUM_CHIPSELECTS)
1030		master->num_chipselect = SPI_NUM_CHIPSELECTS;
1031	else
1032		master->num_chipselect = num_cs;
1033
 
 
1034	master->bus_num = pdev->id;
1035	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1036	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1037	master->max_speed_hz = max_freq;
1038	master->transfer_one = spi_qup_transfer_one;
1039	master->dev.of_node = pdev->dev.of_node;
1040	master->auto_runtime_pm = true;
1041	master->dma_alignment = dma_get_cache_alignment();
1042	master->max_dma_len = SPI_MAX_XFER;
1043
1044	platform_set_drvdata(pdev, master);
1045
1046	controller = spi_master_get_devdata(master);
1047
1048	controller->dev = dev;
1049	controller->base = base;
1050	controller->iclk = iclk;
1051	controller->cclk = cclk;
1052	controller->irq = irq;
1053
1054	ret = spi_qup_init_dma(master, res->start);
1055	if (ret == -EPROBE_DEFER)
1056		goto error;
1057	else if (!ret)
1058		master->can_dma = spi_qup_can_dma;
1059
1060	controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1061
1062	if (!controller->qup_v1)
1063		master->set_cs = spi_qup_set_cs;
1064
1065	spin_lock_init(&controller->lock);
1066	init_completion(&controller->done);
1067
1068	iomode = readl_relaxed(base + QUP_IO_M_MODES);
1069
1070	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1071	if (size)
1072		controller->out_blk_sz = size * 16;
1073	else
1074		controller->out_blk_sz = 4;
1075
1076	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1077	if (size)
1078		controller->in_blk_sz = size * 16;
1079	else
1080		controller->in_blk_sz = 4;
1081
1082	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1083	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1084
1085	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1086	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1087
1088	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1089		 controller->in_blk_sz, controller->in_fifo_sz,
1090		 controller->out_blk_sz, controller->out_fifo_sz);
1091
1092	writel_relaxed(1, base + QUP_SW_RESET);
1093
1094	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1095	if (ret) {
1096		dev_err(dev, "cannot set RESET state\n");
1097		goto error_dma;
1098	}
1099
1100	writel_relaxed(0, base + QUP_OPERATIONAL);
1101	writel_relaxed(0, base + QUP_IO_M_MODES);
1102
1103	if (!controller->qup_v1)
1104		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1105
1106	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1107		       base + SPI_ERROR_FLAGS_EN);
1108
1109	/* if earlier version of the QUP, disable INPUT_OVERRUN */
1110	if (controller->qup_v1)
1111		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1112			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1113			base + QUP_ERROR_FLAGS_EN);
1114
1115	writel_relaxed(0, base + SPI_CONFIG);
1116	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1117
1118	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1119			       IRQF_TRIGGER_HIGH, pdev->name, controller);
1120	if (ret)
1121		goto error_dma;
1122
1123	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1124	pm_runtime_use_autosuspend(dev);
1125	pm_runtime_set_active(dev);
1126	pm_runtime_enable(dev);
1127
1128	ret = devm_spi_register_master(dev, master);
1129	if (ret)
1130		goto disable_pm;
1131
1132	return 0;
1133
1134disable_pm:
1135	pm_runtime_disable(&pdev->dev);
1136error_dma:
1137	spi_qup_release_dma(master);
1138error:
1139	clk_disable_unprepare(cclk);
1140	clk_disable_unprepare(iclk);
1141	spi_master_put(master);
1142	return ret;
1143}
1144
1145#ifdef CONFIG_PM
1146static int spi_qup_pm_suspend_runtime(struct device *device)
1147{
1148	struct spi_master *master = dev_get_drvdata(device);
1149	struct spi_qup *controller = spi_master_get_devdata(master);
1150	u32 config;
1151
1152	/* Enable clocks auto gaiting */
1153	config = readl(controller->base + QUP_CONFIG);
1154	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1155	writel_relaxed(config, controller->base + QUP_CONFIG);
1156
1157	clk_disable_unprepare(controller->cclk);
1158	clk_disable_unprepare(controller->iclk);
1159
1160	return 0;
1161}
1162
1163static int spi_qup_pm_resume_runtime(struct device *device)
1164{
1165	struct spi_master *master = dev_get_drvdata(device);
1166	struct spi_qup *controller = spi_master_get_devdata(master);
1167	u32 config;
1168	int ret;
1169
1170	ret = clk_prepare_enable(controller->iclk);
1171	if (ret)
1172		return ret;
1173
1174	ret = clk_prepare_enable(controller->cclk);
1175	if (ret)
 
1176		return ret;
 
1177
1178	/* Disable clocks auto gaiting */
1179	config = readl_relaxed(controller->base + QUP_CONFIG);
1180	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1181	writel_relaxed(config, controller->base + QUP_CONFIG);
1182	return 0;
1183}
1184#endif /* CONFIG_PM */
1185
1186#ifdef CONFIG_PM_SLEEP
1187static int spi_qup_suspend(struct device *device)
1188{
1189	struct spi_master *master = dev_get_drvdata(device);
1190	struct spi_qup *controller = spi_master_get_devdata(master);
1191	int ret;
1192
 
 
 
 
 
1193	ret = spi_master_suspend(master);
1194	if (ret)
1195		return ret;
1196
1197	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1198	if (ret)
1199		return ret;
1200
1201	if (!pm_runtime_suspended(device)) {
1202		clk_disable_unprepare(controller->cclk);
1203		clk_disable_unprepare(controller->iclk);
1204	}
1205	return 0;
1206}
1207
1208static int spi_qup_resume(struct device *device)
1209{
1210	struct spi_master *master = dev_get_drvdata(device);
1211	struct spi_qup *controller = spi_master_get_devdata(master);
1212	int ret;
1213
1214	ret = clk_prepare_enable(controller->iclk);
1215	if (ret)
1216		return ret;
1217
1218	ret = clk_prepare_enable(controller->cclk);
1219	if (ret)
 
1220		return ret;
 
1221
1222	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1223	if (ret)
1224		return ret;
 
 
 
 
 
 
1225
1226	return spi_master_resume(master);
 
 
 
1227}
1228#endif /* CONFIG_PM_SLEEP */
1229
1230static int spi_qup_remove(struct platform_device *pdev)
1231{
1232	struct spi_master *master = dev_get_drvdata(&pdev->dev);
1233	struct spi_qup *controller = spi_master_get_devdata(master);
1234	int ret;
1235
1236	ret = pm_runtime_get_sync(&pdev->dev);
1237	if (ret < 0)
1238		return ret;
1239
1240	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1241	if (ret)
1242		return ret;
1243
1244	spi_qup_release_dma(master);
1245
1246	clk_disable_unprepare(controller->cclk);
1247	clk_disable_unprepare(controller->iclk);
1248
1249	pm_runtime_put_noidle(&pdev->dev);
1250	pm_runtime_disable(&pdev->dev);
1251
1252	return 0;
1253}
1254
1255static const struct of_device_id spi_qup_dt_match[] = {
1256	{ .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1257	{ .compatible = "qcom,spi-qup-v2.1.1", },
1258	{ .compatible = "qcom,spi-qup-v2.2.1", },
1259	{ }
1260};
1261MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1262
1263static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1264	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1265	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1266			   spi_qup_pm_resume_runtime,
1267			   NULL)
1268};
1269
1270static struct platform_driver spi_qup_driver = {
1271	.driver = {
1272		.name		= "spi_qup",
1273		.pm		= &spi_qup_dev_pm_ops,
1274		.of_match_table = spi_qup_dt_match,
1275	},
1276	.probe = spi_qup_probe,
1277	.remove = spi_qup_remove,
1278};
1279module_platform_driver(spi_qup_driver);
1280
1281MODULE_LICENSE("GPL v2");
1282MODULE_ALIAS("platform:spi_qup");