Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/dmaengine.h>
  10#include <linux/err.h>
  11#include <linux/interconnect.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/list.h>
  15#include <linux/module.h>
  16#include <linux/of.h>
  17#include <linux/platform_device.h>
  18#include <linux/pm_opp.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/spi/spi.h>
  21#include "internals.h"
 
  22
  23#define QUP_CONFIG			0x0000
  24#define QUP_STATE			0x0004
  25#define QUP_IO_M_MODES			0x0008
  26#define QUP_SW_RESET			0x000c
  27#define QUP_OPERATIONAL			0x0018
  28#define QUP_ERROR_FLAGS			0x001c
  29#define QUP_ERROR_FLAGS_EN		0x0020
  30#define QUP_OPERATIONAL_MASK		0x0028
  31#define QUP_HW_VERSION			0x0030
  32#define QUP_MX_OUTPUT_CNT		0x0100
  33#define QUP_OUTPUT_FIFO			0x0110
  34#define QUP_MX_WRITE_CNT		0x0150
  35#define QUP_MX_INPUT_CNT		0x0200
  36#define QUP_MX_READ_CNT			0x0208
  37#define QUP_INPUT_FIFO			0x0218
  38
  39#define SPI_CONFIG			0x0300
  40#define SPI_IO_CONTROL			0x0304
  41#define SPI_ERROR_FLAGS			0x0308
  42#define SPI_ERROR_FLAGS_EN		0x030c
  43
  44/* QUP_CONFIG fields */
  45#define QUP_CONFIG_SPI_MODE		(1 << 8)
  46#define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
  47#define QUP_CONFIG_NO_INPUT		BIT(7)
  48#define QUP_CONFIG_NO_OUTPUT		BIT(6)
  49#define QUP_CONFIG_N			0x001f
  50
  51/* QUP_STATE fields */
  52#define QUP_STATE_VALID			BIT(2)
  53#define QUP_STATE_RESET			0
  54#define QUP_STATE_RUN			1
  55#define QUP_STATE_PAUSE			3
  56#define QUP_STATE_MASK			3
  57#define QUP_STATE_CLEAR			2
  58
  59#define QUP_HW_VERSION_2_1_1		0x20010001
  60
  61/* QUP_IO_M_MODES fields */
  62#define QUP_IO_M_PACK_EN		BIT(15)
  63#define QUP_IO_M_UNPACK_EN		BIT(14)
  64#define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
  65#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
  66#define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
  67#define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
  68
  69#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
  70#define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
  71#define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
  72#define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
  73
  74#define QUP_IO_M_MODE_FIFO		0
  75#define QUP_IO_M_MODE_BLOCK		1
  76#define QUP_IO_M_MODE_DMOV		2
  77#define QUP_IO_M_MODE_BAM		3
  78
  79/* QUP_OPERATIONAL fields */
  80#define QUP_OP_IN_BLOCK_READ_REQ	BIT(13)
  81#define QUP_OP_OUT_BLOCK_WRITE_REQ	BIT(12)
  82#define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
  83#define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
  84#define QUP_OP_IN_SERVICE_FLAG		BIT(9)
  85#define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
  86#define QUP_OP_IN_FIFO_FULL		BIT(7)
  87#define QUP_OP_OUT_FIFO_FULL		BIT(6)
  88#define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
  89#define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
  90
  91/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
  92#define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
  93#define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
  94#define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
  95#define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
  96
  97/* SPI_CONFIG fields */
  98#define SPI_CONFIG_HS_MODE		BIT(10)
  99#define SPI_CONFIG_INPUT_FIRST		BIT(9)
 100#define SPI_CONFIG_LOOPBACK		BIT(8)
 101
 102/* SPI_IO_CONTROL fields */
 103#define SPI_IO_C_FORCE_CS		BIT(11)
 104#define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
 105#define SPI_IO_C_MX_CS_MODE		BIT(8)
 106#define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
 107#define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
 108#define SPI_IO_C_CS_SELECT_MASK		0x000c
 109#define SPI_IO_C_TRISTATE_CS		BIT(1)
 110#define SPI_IO_C_NO_TRI_STATE		BIT(0)
 111
 112/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
 113#define SPI_ERROR_CLK_OVER_RUN		BIT(1)
 114#define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
 115
 116#define SPI_NUM_CHIPSELECTS		4
 117
 118#define SPI_MAX_XFER			(SZ_64K - 64)
 119
 120/* high speed mode is when bus rate is greater then 26MHz */
 121#define SPI_HS_MIN_RATE			26000000
 122#define SPI_MAX_RATE			50000000
 123
 124#define SPI_DELAY_THRESHOLD		1
 125#define SPI_DELAY_RETRY			10
 126
 127#define SPI_BUS_WIDTH			8
 128
 129struct spi_qup {
 130	void __iomem		*base;
 131	struct device		*dev;
 132	struct clk		*cclk;	/* core clock */
 133	struct clk		*iclk;	/* interface clock */
 134	struct icc_path		*icc_path; /* interconnect to RAM */
 135	int			irq;
 136	spinlock_t		lock;
 137
 138	int			in_fifo_sz;
 139	int			out_fifo_sz;
 140	int			in_blk_sz;
 141	int			out_blk_sz;
 142
 143	struct spi_transfer	*xfer;
 144	struct completion	done;
 145	int			error;
 146	int			w_size;	/* bytes per SPI word */
 147	int			n_words;
 148	int			tx_bytes;
 149	int			rx_bytes;
 150	const u8		*tx_buf;
 151	u8			*rx_buf;
 152	int			qup_v1;
 153
 154	int			mode;
 155	struct dma_slave_config	rx_conf;
 156	struct dma_slave_config	tx_conf;
 157
 158	u32			bw_speed_hz;
 159};
 160
 161static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
 162
 163static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
 164{
 165	u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
 166
 167	return (opflag & flag) != 0;
 168}
 169
 170static inline bool spi_qup_is_dma_xfer(int mode)
 171{
 172	if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
 173		return true;
 174
 175	return false;
 176}
 177
 178/* get's the transaction size length */
 179static inline unsigned int spi_qup_len(struct spi_qup *controller)
 180{
 181	return controller->n_words * controller->w_size;
 182}
 183
 184static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
 185{
 186	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
 187
 188	return opstate & QUP_STATE_VALID;
 189}
 190
 191static int spi_qup_vote_bw(struct spi_qup *controller, u32 speed_hz)
 192{
 193	u32 needed_peak_bw;
 194	int ret;
 195
 196	if (controller->bw_speed_hz == speed_hz)
 197		return 0;
 198
 199	needed_peak_bw = Bps_to_icc(speed_hz * SPI_BUS_WIDTH);
 200	ret = icc_set_bw(controller->icc_path, 0, needed_peak_bw);
 201	if (ret)
 202		return ret;
 203
 204	controller->bw_speed_hz = speed_hz;
 205	return 0;
 206}
 207
 208static int spi_qup_set_state(struct spi_qup *controller, u32 state)
 209{
 210	unsigned long loop;
 211	u32 cur_state;
 212
 213	loop = 0;
 214	while (!spi_qup_is_valid_state(controller)) {
 215
 216		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
 217
 218		if (++loop > SPI_DELAY_RETRY)
 219			return -EIO;
 220	}
 221
 222	if (loop)
 223		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
 224			loop, state);
 225
 226	cur_state = readl_relaxed(controller->base + QUP_STATE);
 227	/*
 228	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
 229	 * of (b10) are required
 230	 */
 231	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
 232	    (state == QUP_STATE_RESET)) {
 233		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
 234		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
 235	} else {
 236		cur_state &= ~QUP_STATE_MASK;
 237		cur_state |= state;
 238		writel_relaxed(cur_state, controller->base + QUP_STATE);
 239	}
 240
 241	loop = 0;
 242	while (!spi_qup_is_valid_state(controller)) {
 243
 244		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
 245
 246		if (++loop > SPI_DELAY_RETRY)
 247			return -EIO;
 248	}
 249
 250	return 0;
 251}
 252
 253static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
 
 254{
 255	u8 *rx_buf = controller->rx_buf;
 256	int i, shift, num_bytes;
 257	u32 word;
 258
 259	for (; num_words; num_words--) {
 260
 261		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
 
 
 
 
 262
 263		num_bytes = min_t(int, spi_qup_len(controller) -
 264				       controller->rx_bytes,
 265				       controller->w_size);
 266
 267		if (!rx_buf) {
 268			controller->rx_bytes += num_bytes;
 269			continue;
 270		}
 271
 272		for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
 273			/*
 274			 * The data format depends on bytes per SPI word:
 275			 *  4 bytes: 0x12345678
 276			 *  2 bytes: 0x00001234
 277			 *  1 byte : 0x00000012
 278			 */
 279			shift = BITS_PER_BYTE;
 280			shift *= (controller->w_size - i - 1);
 281			rx_buf[controller->rx_bytes] = word >> shift;
 282		}
 283	}
 284}
 285
 286static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
 
 287{
 288	u32 remainder, words_per_block, num_words;
 289	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
 290
 291	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
 292				 controller->w_size);
 293	words_per_block = controller->in_blk_sz >> 2;
 294
 295	do {
 296		/* ACK by clearing service flag */
 297		writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
 298			       controller->base + QUP_OPERATIONAL);
 299
 300		if (!remainder)
 301			goto exit;
 302
 303		if (is_block_mode) {
 304			num_words = (remainder > words_per_block) ?
 305					words_per_block : remainder;
 306		} else {
 307			if (!spi_qup_is_flag_set(controller,
 308						 QUP_OP_IN_FIFO_NOT_EMPTY))
 309				break;
 310
 311			num_words = 1;
 312		}
 313
 314		/* read up to the maximum transfer size available */
 315		spi_qup_read_from_fifo(controller, num_words);
 316
 317		remainder -= num_words;
 318
 319		/* if block mode, check to see if next block is available */
 320		if (is_block_mode && !spi_qup_is_flag_set(controller,
 321					QUP_OP_IN_BLOCK_READ_REQ))
 322			break;
 323
 324	} while (remainder);
 325
 326	/*
 327	 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
 328	 * reads, it has to be cleared again at the very end.  However, be sure
 329	 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
 330	 * present and this is used to determine if transaction is complete
 331	 */
 332exit:
 333	if (!remainder) {
 334		*opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
 335		if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
 336			writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
 337				       controller->base + QUP_OPERATIONAL);
 338	}
 339}
 340
 341static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
 342{
 343	const u8 *tx_buf = controller->tx_buf;
 344	int i, num_bytes;
 345	u32 word, data;
 346
 347	for (; num_words; num_words--) {
 348		word = 0;
 
 349
 350		num_bytes = min_t(int, spi_qup_len(controller) -
 351				       controller->tx_bytes,
 352				       controller->w_size);
 353		if (tx_buf)
 354			for (i = 0; i < num_bytes; i++) {
 355				data = tx_buf[controller->tx_bytes + i];
 356				word |= data << (BITS_PER_BYTE * (3 - i));
 357			}
 358
 359		controller->tx_bytes += num_bytes;
 
 
 360
 361		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
 362	}
 363}
 364
 365static void spi_qup_dma_done(void *data)
 366{
 367	struct spi_qup *qup = data;
 368
 369	complete(&qup->done);
 370}
 371
 372static void spi_qup_write(struct spi_qup *controller)
 373{
 374	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
 375	u32 remainder, words_per_block, num_words;
 376
 377	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
 378				 controller->w_size);
 379	words_per_block = controller->out_blk_sz >> 2;
 380
 381	do {
 382		/* ACK by clearing service flag */
 383		writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
 384			       controller->base + QUP_OPERATIONAL);
 385
 386		/* make sure the interrupt is valid */
 387		if (!remainder)
 388			return;
 389
 390		if (is_block_mode) {
 391			num_words = (remainder > words_per_block) ?
 392				words_per_block : remainder;
 393		} else {
 394			if (spi_qup_is_flag_set(controller,
 395						QUP_OP_OUT_FIFO_FULL))
 396				break;
 397
 398			num_words = 1;
 399		}
 400
 401		spi_qup_write_to_fifo(controller, num_words);
 402
 403		remainder -= num_words;
 404
 405		/* if block mode, check to see if next block is available */
 406		if (is_block_mode && !spi_qup_is_flag_set(controller,
 407					QUP_OP_OUT_BLOCK_WRITE_REQ))
 408			break;
 409
 410	} while (remainder);
 411}
 412
 413static int spi_qup_prep_sg(struct spi_controller *host, struct scatterlist *sgl,
 414			   unsigned int nents, enum dma_transfer_direction dir,
 415			   dma_async_tx_callback callback)
 416{
 417	struct spi_qup *qup = spi_controller_get_devdata(host);
 418	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
 419	struct dma_async_tx_descriptor *desc;
 
 420	struct dma_chan *chan;
 421	dma_cookie_t cookie;
 
 422
 423	if (dir == DMA_MEM_TO_DEV)
 424		chan = host->dma_tx;
 425	else
 426		chan = host->dma_rx;
 
 
 
 
 
 427
 428	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
 429	if (IS_ERR_OR_NULL(desc))
 430		return desc ? PTR_ERR(desc) : -EINVAL;
 431
 432	desc->callback = callback;
 433	desc->callback_param = qup;
 434
 435	cookie = dmaengine_submit(desc);
 436
 437	return dma_submit_error(cookie);
 438}
 439
 440static void spi_qup_dma_terminate(struct spi_controller *host,
 441				  struct spi_transfer *xfer)
 442{
 443	if (xfer->tx_buf)
 444		dmaengine_terminate_all(host->dma_tx);
 445	if (xfer->rx_buf)
 446		dmaengine_terminate_all(host->dma_rx);
 447}
 448
 449static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
 450				     u32 *nents)
 451{
 452	struct scatterlist *sg;
 453	u32 total = 0;
 454
 455	for (sg = sgl; sg; sg = sg_next(sg)) {
 456		unsigned int len = sg_dma_len(sg);
 457
 458		/* check for overflow as well as limit */
 459		if (((total + len) < total) || ((total + len) > max))
 460			break;
 461
 462		total += len;
 463		(*nents)++;
 464	}
 465
 466	return total;
 467}
 468
 469static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
 470			  unsigned long timeout)
 471{
 472	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
 473	struct spi_controller *host = spi->controller;
 474	struct spi_qup *qup = spi_controller_get_devdata(host);
 475	struct scatterlist *tx_sgl, *rx_sgl;
 476	int ret;
 477
 478	ret = spi_qup_vote_bw(qup, xfer->speed_hz);
 479	if (ret) {
 480		dev_err(qup->dev, "fail to vote for ICC bandwidth: %d\n", ret);
 481		return -EIO;
 482	}
 483
 484	if (xfer->rx_buf)
 485		rx_done = spi_qup_dma_done;
 486	else if (xfer->tx_buf)
 487		tx_done = spi_qup_dma_done;
 488
 489	rx_sgl = xfer->rx_sg.sgl;
 490	tx_sgl = xfer->tx_sg.sgl;
 491
 492	do {
 493		u32 rx_nents = 0, tx_nents = 0;
 494
 495		if (rx_sgl)
 496			qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
 497					SPI_MAX_XFER, &rx_nents) / qup->w_size;
 498		if (tx_sgl)
 499			qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
 500					SPI_MAX_XFER, &tx_nents) / qup->w_size;
 501		if (!qup->n_words)
 502			return -EIO;
 503
 504		ret = spi_qup_io_config(spi, xfer);
 505		if (ret)
 506			return ret;
 507
 508		/* before issuing the descriptors, set the QUP to run */
 509		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 510		if (ret) {
 511			dev_warn(qup->dev, "cannot set RUN state\n");
 512			return ret;
 513		}
 514		if (rx_sgl) {
 515			ret = spi_qup_prep_sg(host, rx_sgl, rx_nents,
 516					      DMA_DEV_TO_MEM, rx_done);
 517			if (ret)
 518				return ret;
 519			dma_async_issue_pending(host->dma_rx);
 520		}
 521
 522		if (tx_sgl) {
 523			ret = spi_qup_prep_sg(host, tx_sgl, tx_nents,
 524					      DMA_MEM_TO_DEV, tx_done);
 525			if (ret)
 526				return ret;
 527
 528			dma_async_issue_pending(host->dma_tx);
 529		}
 530
 531		if (!wait_for_completion_timeout(&qup->done, timeout))
 532			return -ETIMEDOUT;
 533
 534		for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
 535			;
 536		for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
 537			;
 538
 539	} while (rx_sgl || tx_sgl);
 540
 541	return 0;
 542}
 543
 544static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
 545			  unsigned long timeout)
 546{
 547	struct spi_controller *host = spi->controller;
 548	struct spi_qup *qup = spi_controller_get_devdata(host);
 549	int ret, n_words, iterations, offset = 0;
 550
 551	n_words = qup->n_words;
 552	iterations = n_words / SPI_MAX_XFER; /* round down */
 553	qup->rx_buf = xfer->rx_buf;
 554	qup->tx_buf = xfer->tx_buf;
 555
 556	do {
 557		if (iterations)
 558			qup->n_words = SPI_MAX_XFER;
 559		else
 560			qup->n_words = n_words % SPI_MAX_XFER;
 561
 562		if (qup->tx_buf && offset)
 563			qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
 564
 565		if (qup->rx_buf && offset)
 566			qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
 567
 568		/*
 569		 * if the transaction is small enough, we need
 570		 * to fallback to FIFO mode
 571		 */
 572		if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
 573			qup->mode = QUP_IO_M_MODE_FIFO;
 574
 575		ret = spi_qup_io_config(spi, xfer);
 
 576		if (ret)
 577			return ret;
 578
 579		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 580		if (ret) {
 581			dev_warn(qup->dev, "cannot set RUN state\n");
 582			return ret;
 583		}
 584
 585		ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
 586		if (ret) {
 587			dev_warn(qup->dev, "cannot set PAUSE state\n");
 588			return ret;
 589		}
 590
 591		if (qup->mode == QUP_IO_M_MODE_FIFO)
 592			spi_qup_write(qup);
 593
 594		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 595		if (ret) {
 596			dev_warn(qup->dev, "cannot set RUN state\n");
 597			return ret;
 598		}
 599
 600		if (!wait_for_completion_timeout(&qup->done, timeout))
 601			return -ETIMEDOUT;
 602
 603		offset++;
 604	} while (iterations--);
 605
 606	return 0;
 607}
 608
 609static bool spi_qup_data_pending(struct spi_qup *controller)
 610{
 611	unsigned int remainder_tx, remainder_rx;
 
 612
 613	remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
 614				    controller->tx_bytes, controller->w_size);
 
 
 
 615
 616	remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
 617				    controller->rx_bytes, controller->w_size);
 
 
 
 618
 619	return remainder_tx || remainder_rx;
 
 
 620}
 621
 622static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
 623{
 624	struct spi_qup *controller = dev_id;
 
 625	u32 opflags, qup_err, spi_err;
 
 626	int error = 0;
 627
 
 
 
 
 
 628	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
 629	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
 630	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
 631
 632	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
 633	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
 
 
 
 
 
 
 
 634
 635	if (qup_err) {
 636		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
 637			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
 638		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
 639			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
 640		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
 641			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
 642		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
 643			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
 644
 645		error = -EIO;
 646	}
 647
 648	if (spi_err) {
 649		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
 650			dev_warn(controller->dev, "CLK_OVER_RUN\n");
 651		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
 652			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
 653
 654		error = -EIO;
 655	}
 656
 657	spin_lock(&controller->lock);
 658	if (!controller->error)
 659		controller->error = error;
 660	spin_unlock(&controller->lock);
 661
 662	if (spi_qup_is_dma_xfer(controller->mode)) {
 663		writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
 664	} else {
 665		if (opflags & QUP_OP_IN_SERVICE_FLAG)
 666			spi_qup_read(controller, &opflags);
 667
 668		if (opflags & QUP_OP_OUT_SERVICE_FLAG)
 669			spi_qup_write(controller);
 670
 671		if (!spi_qup_data_pending(controller))
 672			complete(&controller->done);
 673	}
 674
 675	if (error)
 676		complete(&controller->done);
 
 
 677
 678	if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
 679		if (!spi_qup_is_dma_xfer(controller->mode)) {
 680			if (spi_qup_data_pending(controller))
 681				return IRQ_HANDLED;
 682		}
 683		complete(&controller->done);
 684	}
 685
 686	return IRQ_HANDLED;
 687}
 688
 689/* set clock freq ... bits per word, determine mode */
 690static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
 691{
 692	struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
 693	int ret;
 694
 695	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
 696		dev_err(controller->dev, "too big size for loopback %d > %d\n",
 697			xfer->len, controller->in_fifo_sz);
 698		return -EIO;
 699	}
 700
 701	ret = dev_pm_opp_set_rate(controller->dev, xfer->speed_hz);
 702	if (ret) {
 703		dev_err(controller->dev, "fail to set frequency %d",
 704			xfer->speed_hz);
 705		return -EIO;
 706	}
 707
 708	controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
 709	controller->n_words = xfer->len / controller->w_size;
 710
 711	if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
 712		controller->mode = QUP_IO_M_MODE_FIFO;
 713	else if (spi_xfer_is_dma_mapped(spi->controller, spi, xfer))
 714		controller->mode = QUP_IO_M_MODE_BAM;
 715	else
 716		controller->mode = QUP_IO_M_MODE_BLOCK;
 717
 718	return 0;
 719}
 720
 721/* prep qup for another spi transaction of specific type */
 722static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
 723{
 724	struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
 725	u32 config, iomode, control;
 726	unsigned long flags;
 727
 728	spin_lock_irqsave(&controller->lock, flags);
 729	controller->xfer     = xfer;
 730	controller->error    = 0;
 731	controller->rx_bytes = 0;
 732	controller->tx_bytes = 0;
 733	spin_unlock_irqrestore(&controller->lock, flags);
 734
 
 
 
 
 
 
 735
 736	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
 737		dev_err(controller->dev, "cannot set RESET state\n");
 738		return -EIO;
 739	}
 740
 741	switch (controller->mode) {
 742	case QUP_IO_M_MODE_FIFO:
 743		writel_relaxed(controller->n_words,
 744			       controller->base + QUP_MX_READ_CNT);
 745		writel_relaxed(controller->n_words,
 746			       controller->base + QUP_MX_WRITE_CNT);
 747		/* must be zero for FIFO */
 748		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
 749		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
 750		break;
 751	case QUP_IO_M_MODE_BAM:
 752		writel_relaxed(controller->n_words,
 753			       controller->base + QUP_MX_INPUT_CNT);
 754		writel_relaxed(controller->n_words,
 755			       controller->base + QUP_MX_OUTPUT_CNT);
 756		/* must be zero for BLOCK and BAM */
 757		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
 758		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
 
 
 
 
 759
 760		if (!controller->qup_v1) {
 761			void __iomem *input_cnt;
 762
 763			input_cnt = controller->base + QUP_MX_INPUT_CNT;
 764			/*
 765			 * for DMA transfers, both QUP_MX_INPUT_CNT and
 766			 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
 767			 * That case is a non-balanced transfer when there is
 768			 * only a rx_buf.
 769			 */
 770			if (xfer->tx_buf)
 771				writel_relaxed(0, input_cnt);
 772			else
 773				writel_relaxed(controller->n_words, input_cnt);
 774
 775			writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
 776		}
 777		break;
 778	case QUP_IO_M_MODE_BLOCK:
 779		reinit_completion(&controller->done);
 780		writel_relaxed(controller->n_words,
 781			       controller->base + QUP_MX_INPUT_CNT);
 782		writel_relaxed(controller->n_words,
 783			       controller->base + QUP_MX_OUTPUT_CNT);
 784		/* must be zero for BLOCK and BAM */
 785		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
 786		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
 787		break;
 788	default:
 789		dev_err(controller->dev, "unknown mode = %d\n",
 790				controller->mode);
 791		return -EIO;
 792	}
 793
 794	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
 795	/* Set input and output transfer mode */
 796	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
 797
 798	if (!spi_qup_is_dma_xfer(controller->mode))
 799		iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
 800	else
 801		iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
 802
 803	iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
 804	iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
 805
 806	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
 807
 808	control = readl_relaxed(controller->base + SPI_IO_CONTROL);
 809
 810	if (spi->mode & SPI_CPOL)
 811		control |= SPI_IO_C_CLK_IDLE_HIGH;
 812	else
 813		control &= ~SPI_IO_C_CLK_IDLE_HIGH;
 814
 815	writel_relaxed(control, controller->base + SPI_IO_CONTROL);
 816
 817	config = readl_relaxed(controller->base + SPI_CONFIG);
 818
 819	if (spi->mode & SPI_LOOP)
 820		config |= SPI_CONFIG_LOOPBACK;
 821	else
 822		config &= ~SPI_CONFIG_LOOPBACK;
 823
 824	if (spi->mode & SPI_CPHA)
 825		config &= ~SPI_CONFIG_INPUT_FIRST;
 826	else
 827		config |= SPI_CONFIG_INPUT_FIRST;
 828
 829	/*
 830	 * HS_MODE improves signal stability for spi-clk high rates,
 831	 * but is invalid in loop back mode.
 832	 */
 833	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
 834		config |= SPI_CONFIG_HS_MODE;
 835	else
 836		config &= ~SPI_CONFIG_HS_MODE;
 837
 838	writel_relaxed(config, controller->base + SPI_CONFIG);
 839
 840	config = readl_relaxed(controller->base + QUP_CONFIG);
 841	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
 842	config |= xfer->bits_per_word - 1;
 843	config |= QUP_CONFIG_SPI_MODE;
 844
 845	if (spi_qup_is_dma_xfer(controller->mode)) {
 846		if (!xfer->tx_buf)
 847			config |= QUP_CONFIG_NO_OUTPUT;
 848		if (!xfer->rx_buf)
 849			config |= QUP_CONFIG_NO_INPUT;
 850	}
 851
 852	writel_relaxed(config, controller->base + QUP_CONFIG);
 853
 854	/* only write to OPERATIONAL_MASK when register is present */
 855	if (!controller->qup_v1) {
 856		u32 mask = 0;
 857
 858		/*
 859		 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
 860		 * status change in BAM mode
 861		 */
 862
 863		if (spi_qup_is_dma_xfer(controller->mode))
 864			mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
 865
 866		writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
 867	}
 868
 869	return 0;
 870}
 871
 872static int spi_qup_transfer_one(struct spi_controller *host,
 873			      struct spi_device *spi,
 874			      struct spi_transfer *xfer)
 875{
 876	struct spi_qup *controller = spi_controller_get_devdata(host);
 877	unsigned long timeout, flags;
 878	int ret;
 879
 880	ret = spi_qup_io_prep(spi, xfer);
 881	if (ret)
 882		return ret;
 883
 884	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
 885	timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
 886				     xfer->len) * 8, timeout);
 887	timeout = 100 * msecs_to_jiffies(timeout);
 888
 889	reinit_completion(&controller->done);
 890
 891	spin_lock_irqsave(&controller->lock, flags);
 892	controller->xfer     = xfer;
 893	controller->error    = 0;
 894	controller->rx_bytes = 0;
 895	controller->tx_bytes = 0;
 896	spin_unlock_irqrestore(&controller->lock, flags);
 897
 898	if (spi_qup_is_dma_xfer(controller->mode))
 899		ret = spi_qup_do_dma(spi, xfer, timeout);
 900	else
 901		ret = spi_qup_do_pio(spi, xfer, timeout);
 
 
 
 902
 
 
 
 
 
 
 
 
 
 903	spi_qup_set_state(controller, QUP_STATE_RESET);
 904	spin_lock_irqsave(&controller->lock, flags);
 
 905	if (!ret)
 906		ret = controller->error;
 907	spin_unlock_irqrestore(&controller->lock, flags);
 908
 909	if (ret && spi_qup_is_dma_xfer(controller->mode))
 910		spi_qup_dma_terminate(host, xfer);
 911
 912	return ret;
 913}
 914
 915static bool spi_qup_can_dma(struct spi_controller *host, struct spi_device *spi,
 916			    struct spi_transfer *xfer)
 917{
 918	struct spi_qup *qup = spi_controller_get_devdata(host);
 919	size_t dma_align = dma_get_cache_alignment();
 920	int n_words;
 921
 922	if (xfer->rx_buf) {
 923		if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
 924		    IS_ERR_OR_NULL(host->dma_rx))
 925			return false;
 926		if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
 927			return false;
 928	}
 929
 930	if (xfer->tx_buf) {
 931		if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
 932		    IS_ERR_OR_NULL(host->dma_tx))
 933			return false;
 934		if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
 935			return false;
 936	}
 937
 938	n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
 939	if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
 
 940		return false;
 941
 
 
 
 
 
 
 942	return true;
 943}
 944
 945static void spi_qup_release_dma(struct spi_controller *host)
 946{
 947	if (!IS_ERR_OR_NULL(host->dma_rx))
 948		dma_release_channel(host->dma_rx);
 949	if (!IS_ERR_OR_NULL(host->dma_tx))
 950		dma_release_channel(host->dma_tx);
 951}
 952
 953static int spi_qup_init_dma(struct spi_controller *host, resource_size_t base)
 954{
 955	struct spi_qup *spi = spi_controller_get_devdata(host);
 956	struct dma_slave_config *rx_conf = &spi->rx_conf,
 957				*tx_conf = &spi->tx_conf;
 958	struct device *dev = spi->dev;
 959	int ret;
 960
 961	/* allocate dma resources, if available */
 962	host->dma_rx = dma_request_chan(dev, "rx");
 963	if (IS_ERR(host->dma_rx))
 964		return PTR_ERR(host->dma_rx);
 965
 966	host->dma_tx = dma_request_chan(dev, "tx");
 967	if (IS_ERR(host->dma_tx)) {
 968		ret = PTR_ERR(host->dma_tx);
 969		goto err_tx;
 970	}
 971
 972	/* set DMA parameters */
 973	rx_conf->direction = DMA_DEV_TO_MEM;
 974	rx_conf->device_fc = 1;
 975	rx_conf->src_addr = base + QUP_INPUT_FIFO;
 976	rx_conf->src_maxburst = spi->in_blk_sz;
 977
 978	tx_conf->direction = DMA_MEM_TO_DEV;
 979	tx_conf->device_fc = 1;
 980	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
 981	tx_conf->dst_maxburst = spi->out_blk_sz;
 982
 983	ret = dmaengine_slave_config(host->dma_rx, rx_conf);
 984	if (ret) {
 985		dev_err(dev, "failed to configure RX channel\n");
 986		goto err;
 987	}
 988
 989	ret = dmaengine_slave_config(host->dma_tx, tx_conf);
 990	if (ret) {
 991		dev_err(dev, "failed to configure TX channel\n");
 992		goto err;
 993	}
 994
 995	return 0;
 996
 997err:
 998	dma_release_channel(host->dma_tx);
 999err_tx:
1000	dma_release_channel(host->dma_rx);
1001	return ret;
1002}
1003
1004static void spi_qup_set_cs(struct spi_device *spi, bool val)
1005{
1006	struct spi_qup *controller;
1007	u32 spi_ioc;
1008	u32 spi_ioc_orig;
1009
1010	controller = spi_controller_get_devdata(spi->controller);
1011	spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
1012	spi_ioc_orig = spi_ioc;
1013	if (!val)
1014		spi_ioc |= SPI_IO_C_FORCE_CS;
1015	else
1016		spi_ioc &= ~SPI_IO_C_FORCE_CS;
1017
1018	if (spi_ioc != spi_ioc_orig)
1019		writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
1020}
1021
1022static int spi_qup_probe(struct platform_device *pdev)
1023{
1024	struct spi_controller *host;
1025	struct icc_path *icc_path;
1026	struct clk *iclk, *cclk;
1027	struct spi_qup *controller;
1028	struct resource *res;
1029	struct device *dev;
1030	void __iomem *base;
1031	u32 max_freq, iomode, num_cs;
1032	int ret, irq, size;
1033
1034	dev = &pdev->dev;
1035	base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
 
1036	if (IS_ERR(base))
1037		return PTR_ERR(base);
1038
1039	irq = platform_get_irq(pdev, 0);
1040	if (irq < 0)
1041		return irq;
1042
1043	cclk = devm_clk_get(dev, "core");
1044	if (IS_ERR(cclk))
1045		return PTR_ERR(cclk);
1046
1047	iclk = devm_clk_get(dev, "iface");
1048	if (IS_ERR(iclk))
1049		return PTR_ERR(iclk);
1050
1051	icc_path = devm_of_icc_get(dev, NULL);
1052	if (IS_ERR(icc_path))
1053		return dev_err_probe(dev, PTR_ERR(icc_path),
1054				     "failed to get interconnect path\n");
1055
1056	/* This is optional parameter */
1057	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
1058		max_freq = SPI_MAX_RATE;
1059
1060	if (!max_freq || max_freq > SPI_MAX_RATE) {
1061		dev_err(dev, "invalid clock frequency %d\n", max_freq);
1062		return -ENXIO;
1063	}
1064
1065	ret = devm_pm_opp_set_clkname(dev, "core");
1066	if (ret)
 
 
 
 
 
 
 
 
1067		return ret;
 
1068
1069	/* OPP table is optional */
1070	ret = devm_pm_opp_of_add_table(dev);
1071	if (ret && ret != -ENODEV)
1072		return dev_err_probe(dev, ret, "invalid OPP table\n");
1073
1074	host = spi_alloc_host(dev, sizeof(struct spi_qup));
1075	if (!host) {
1076		dev_err(dev, "cannot allocate host\n");
1077		return -ENOMEM;
1078	}
1079
1080	/* use num-cs unless not present or out of range */
1081	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1082	    num_cs > SPI_NUM_CHIPSELECTS)
1083		host->num_chipselect = SPI_NUM_CHIPSELECTS;
1084	else
1085		host->num_chipselect = num_cs;
1086
1087	host->use_gpio_descriptors = true;
1088	host->max_native_cs = SPI_NUM_CHIPSELECTS;
1089	host->bus_num = pdev->id;
1090	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1091	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1092	host->max_speed_hz = max_freq;
1093	host->transfer_one = spi_qup_transfer_one;
1094	host->dev.of_node = pdev->dev.of_node;
1095	host->auto_runtime_pm = true;
1096	host->dma_alignment = dma_get_cache_alignment();
1097	host->max_dma_len = SPI_MAX_XFER;
1098
1099	platform_set_drvdata(pdev, host);
1100
1101	controller = spi_controller_get_devdata(host);
1102
1103	controller->dev = dev;
1104	controller->base = base;
1105	controller->iclk = iclk;
1106	controller->cclk = cclk;
1107	controller->icc_path = icc_path;
1108	controller->irq = irq;
1109
1110	ret = spi_qup_init_dma(host, res->start);
1111	if (ret == -EPROBE_DEFER)
1112		goto error;
1113	else if (!ret)
1114		host->can_dma = spi_qup_can_dma;
1115
1116	controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1117
1118	if (!controller->qup_v1)
1119		host->set_cs = spi_qup_set_cs;
1120
1121	spin_lock_init(&controller->lock);
1122	init_completion(&controller->done);
1123
1124	ret = clk_prepare_enable(cclk);
1125	if (ret) {
1126		dev_err(dev, "cannot enable core clock\n");
1127		goto error_dma;
1128	}
1129
1130	ret = clk_prepare_enable(iclk);
1131	if (ret) {
1132		clk_disable_unprepare(cclk);
1133		dev_err(dev, "cannot enable iface clock\n");
1134		goto error_dma;
1135	}
1136
1137	iomode = readl_relaxed(base + QUP_IO_M_MODES);
1138
1139	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1140	if (size)
1141		controller->out_blk_sz = size * 16;
1142	else
1143		controller->out_blk_sz = 4;
1144
1145	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1146	if (size)
1147		controller->in_blk_sz = size * 16;
1148	else
1149		controller->in_blk_sz = 4;
1150
1151	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1152	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1153
1154	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1155	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1156
1157	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1158		 controller->in_blk_sz, controller->in_fifo_sz,
1159		 controller->out_blk_sz, controller->out_fifo_sz);
1160
1161	writel_relaxed(1, base + QUP_SW_RESET);
1162
1163	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1164	if (ret) {
1165		dev_err(dev, "cannot set RESET state\n");
1166		goto error_clk;
1167	}
1168
1169	writel_relaxed(0, base + QUP_OPERATIONAL);
1170	writel_relaxed(0, base + QUP_IO_M_MODES);
1171
1172	if (!controller->qup_v1)
1173		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1174
1175	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1176		       base + SPI_ERROR_FLAGS_EN);
1177
1178	/* if earlier version of the QUP, disable INPUT_OVERRUN */
1179	if (controller->qup_v1)
1180		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1181			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1182			base + QUP_ERROR_FLAGS_EN);
1183
1184	writel_relaxed(0, base + SPI_CONFIG);
1185	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1186
1187	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1188			       IRQF_TRIGGER_HIGH, pdev->name, controller);
1189	if (ret)
1190		goto error_clk;
1191
1192	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1193	pm_runtime_use_autosuspend(dev);
1194	pm_runtime_set_active(dev);
1195	pm_runtime_enable(dev);
1196
1197	ret = devm_spi_register_controller(dev, host);
1198	if (ret)
1199		goto disable_pm;
1200
1201	return 0;
1202
1203disable_pm:
1204	pm_runtime_disable(&pdev->dev);
1205error_clk:
1206	clk_disable_unprepare(cclk);
1207	clk_disable_unprepare(iclk);
1208error_dma:
1209	spi_qup_release_dma(host);
1210error:
1211	spi_controller_put(host);
 
 
1212	return ret;
1213}
1214
1215#ifdef CONFIG_PM
1216static int spi_qup_pm_suspend_runtime(struct device *device)
1217{
1218	struct spi_controller *host = dev_get_drvdata(device);
1219	struct spi_qup *controller = spi_controller_get_devdata(host);
1220	u32 config;
1221
1222	/* Enable clocks auto gaiting */
1223	config = readl(controller->base + QUP_CONFIG);
1224	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1225	writel_relaxed(config, controller->base + QUP_CONFIG);
1226
1227	clk_disable_unprepare(controller->cclk);
1228	spi_qup_vote_bw(controller, 0);
1229	clk_disable_unprepare(controller->iclk);
1230
1231	return 0;
1232}
1233
1234static int spi_qup_pm_resume_runtime(struct device *device)
1235{
1236	struct spi_controller *host = dev_get_drvdata(device);
1237	struct spi_qup *controller = spi_controller_get_devdata(host);
1238	u32 config;
1239	int ret;
1240
1241	ret = clk_prepare_enable(controller->iclk);
1242	if (ret)
1243		return ret;
1244
1245	ret = clk_prepare_enable(controller->cclk);
1246	if (ret) {
1247		clk_disable_unprepare(controller->iclk);
1248		return ret;
1249	}
1250
1251	/* Disable clocks auto gaiting */
1252	config = readl_relaxed(controller->base + QUP_CONFIG);
1253	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1254	writel_relaxed(config, controller->base + QUP_CONFIG);
1255	return 0;
1256}
1257#endif /* CONFIG_PM */
1258
1259#ifdef CONFIG_PM_SLEEP
1260static int spi_qup_suspend(struct device *device)
1261{
1262	struct spi_controller *host = dev_get_drvdata(device);
1263	struct spi_qup *controller = spi_controller_get_devdata(host);
1264	int ret;
1265
1266	if (pm_runtime_suspended(device)) {
1267		ret = spi_qup_pm_resume_runtime(device);
1268		if (ret)
1269			return ret;
1270	}
1271	ret = spi_controller_suspend(host);
1272	if (ret)
1273		return ret;
1274
1275	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1276	if (ret)
1277		return ret;
1278
1279	clk_disable_unprepare(controller->cclk);
1280	spi_qup_vote_bw(controller, 0);
1281	clk_disable_unprepare(controller->iclk);
1282	return 0;
1283}
1284
1285static int spi_qup_resume(struct device *device)
1286{
1287	struct spi_controller *host = dev_get_drvdata(device);
1288	struct spi_qup *controller = spi_controller_get_devdata(host);
1289	int ret;
1290
1291	ret = clk_prepare_enable(controller->iclk);
1292	if (ret)
1293		return ret;
1294
1295	ret = clk_prepare_enable(controller->cclk);
1296	if (ret) {
1297		clk_disable_unprepare(controller->iclk);
1298		return ret;
1299	}
1300
1301	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1302	if (ret)
1303		goto disable_clk;
1304
1305	ret = spi_controller_resume(host);
1306	if (ret)
1307		goto disable_clk;
1308
1309	return 0;
1310
1311disable_clk:
1312	clk_disable_unprepare(controller->cclk);
1313	clk_disable_unprepare(controller->iclk);
1314	return ret;
1315}
1316#endif /* CONFIG_PM_SLEEP */
1317
1318static void spi_qup_remove(struct platform_device *pdev)
1319{
1320	struct spi_controller *host = dev_get_drvdata(&pdev->dev);
1321	struct spi_qup *controller = spi_controller_get_devdata(host);
1322	int ret;
1323
1324	ret = pm_runtime_get_sync(&pdev->dev);
 
 
1325
1326	if (ret >= 0) {
1327		ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1328		if (ret)
1329			dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
1330				 ERR_PTR(ret));
1331
1332		clk_disable_unprepare(controller->cclk);
1333		clk_disable_unprepare(controller->iclk);
1334	} else {
1335		dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
1336			 ERR_PTR(ret));
1337	}
1338
1339	spi_qup_release_dma(host);
 
1340
1341	pm_runtime_put_noidle(&pdev->dev);
1342	pm_runtime_disable(&pdev->dev);
 
1343}
1344
1345static const struct of_device_id spi_qup_dt_match[] = {
1346	{ .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1347	{ .compatible = "qcom,spi-qup-v2.1.1", },
1348	{ .compatible = "qcom,spi-qup-v2.2.1", },
1349	{ }
1350};
1351MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1352
1353static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1354	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1355	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1356			   spi_qup_pm_resume_runtime,
1357			   NULL)
1358};
1359
1360static struct platform_driver spi_qup_driver = {
1361	.driver = {
1362		.name		= "spi_qup",
1363		.pm		= &spi_qup_dev_pm_ops,
1364		.of_match_table = spi_qup_dt_match,
1365	},
1366	.probe = spi_qup_probe,
1367	.remove = spi_qup_remove,
1368};
1369module_platform_driver(spi_qup_driver);
1370
1371MODULE_DESCRIPTION("Qualcomm SPI controller with QUP interface");
1372MODULE_LICENSE("GPL v2");
1373MODULE_ALIAS("platform:spi_qup");
v4.6
 
   1/*
   2 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License rev 2 and
   6 * only rev 2 as published by the free Software foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/delay.h>
 
 
  16#include <linux/err.h>
 
  17#include <linux/interrupt.h>
  18#include <linux/io.h>
  19#include <linux/list.h>
  20#include <linux/module.h>
  21#include <linux/of.h>
  22#include <linux/platform_device.h>
 
  23#include <linux/pm_runtime.h>
  24#include <linux/spi/spi.h>
  25#include <linux/dmaengine.h>
  26#include <linux/dma-mapping.h>
  27
  28#define QUP_CONFIG			0x0000
  29#define QUP_STATE			0x0004
  30#define QUP_IO_M_MODES			0x0008
  31#define QUP_SW_RESET			0x000c
  32#define QUP_OPERATIONAL			0x0018
  33#define QUP_ERROR_FLAGS			0x001c
  34#define QUP_ERROR_FLAGS_EN		0x0020
  35#define QUP_OPERATIONAL_MASK		0x0028
  36#define QUP_HW_VERSION			0x0030
  37#define QUP_MX_OUTPUT_CNT		0x0100
  38#define QUP_OUTPUT_FIFO			0x0110
  39#define QUP_MX_WRITE_CNT		0x0150
  40#define QUP_MX_INPUT_CNT		0x0200
  41#define QUP_MX_READ_CNT			0x0208
  42#define QUP_INPUT_FIFO			0x0218
  43
  44#define SPI_CONFIG			0x0300
  45#define SPI_IO_CONTROL			0x0304
  46#define SPI_ERROR_FLAGS			0x0308
  47#define SPI_ERROR_FLAGS_EN		0x030c
  48
  49/* QUP_CONFIG fields */
  50#define QUP_CONFIG_SPI_MODE		(1 << 8)
  51#define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
  52#define QUP_CONFIG_NO_INPUT		BIT(7)
  53#define QUP_CONFIG_NO_OUTPUT		BIT(6)
  54#define QUP_CONFIG_N			0x001f
  55
  56/* QUP_STATE fields */
  57#define QUP_STATE_VALID			BIT(2)
  58#define QUP_STATE_RESET			0
  59#define QUP_STATE_RUN			1
  60#define QUP_STATE_PAUSE			3
  61#define QUP_STATE_MASK			3
  62#define QUP_STATE_CLEAR			2
  63
  64#define QUP_HW_VERSION_2_1_1		0x20010001
  65
  66/* QUP_IO_M_MODES fields */
  67#define QUP_IO_M_PACK_EN		BIT(15)
  68#define QUP_IO_M_UNPACK_EN		BIT(14)
  69#define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
  70#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
  71#define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
  72#define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
  73
  74#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
  75#define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
  76#define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
  77#define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
  78
  79#define QUP_IO_M_MODE_FIFO		0
  80#define QUP_IO_M_MODE_BLOCK		1
  81#define QUP_IO_M_MODE_DMOV		2
  82#define QUP_IO_M_MODE_BAM		3
  83
  84/* QUP_OPERATIONAL fields */
 
 
  85#define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
  86#define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
  87#define QUP_OP_IN_SERVICE_FLAG		BIT(9)
  88#define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
  89#define QUP_OP_IN_FIFO_FULL		BIT(7)
  90#define QUP_OP_OUT_FIFO_FULL		BIT(6)
  91#define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
  92#define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
  93
  94/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
  95#define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
  96#define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
  97#define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
  98#define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
  99
 100/* SPI_CONFIG fields */
 101#define SPI_CONFIG_HS_MODE		BIT(10)
 102#define SPI_CONFIG_INPUT_FIRST		BIT(9)
 103#define SPI_CONFIG_LOOPBACK		BIT(8)
 104
 105/* SPI_IO_CONTROL fields */
 106#define SPI_IO_C_FORCE_CS		BIT(11)
 107#define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
 108#define SPI_IO_C_MX_CS_MODE		BIT(8)
 109#define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
 110#define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
 111#define SPI_IO_C_CS_SELECT_MASK		0x000c
 112#define SPI_IO_C_TRISTATE_CS		BIT(1)
 113#define SPI_IO_C_NO_TRI_STATE		BIT(0)
 114
 115/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
 116#define SPI_ERROR_CLK_OVER_RUN		BIT(1)
 117#define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
 118
 119#define SPI_NUM_CHIPSELECTS		4
 120
 121#define SPI_MAX_DMA_XFER		(SZ_64K - 64)
 122
 123/* high speed mode is when bus rate is greater then 26MHz */
 124#define SPI_HS_MIN_RATE			26000000
 125#define SPI_MAX_RATE			50000000
 126
 127#define SPI_DELAY_THRESHOLD		1
 128#define SPI_DELAY_RETRY			10
 129
 
 
 130struct spi_qup {
 131	void __iomem		*base;
 132	struct device		*dev;
 133	struct clk		*cclk;	/* core clock */
 134	struct clk		*iclk;	/* interface clock */
 
 135	int			irq;
 136	spinlock_t		lock;
 137
 138	int			in_fifo_sz;
 139	int			out_fifo_sz;
 140	int			in_blk_sz;
 141	int			out_blk_sz;
 142
 143	struct spi_transfer	*xfer;
 144	struct completion	done;
 145	int			error;
 146	int			w_size;	/* bytes per SPI word */
 147	int			n_words;
 148	int			tx_bytes;
 149	int			rx_bytes;
 
 
 150	int			qup_v1;
 151
 152	int			use_dma;
 153	struct dma_slave_config	rx_conf;
 154	struct dma_slave_config	tx_conf;
 
 
 155};
 156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157
 158static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
 159{
 160	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
 161
 162	return opstate & QUP_STATE_VALID;
 163}
 164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 165static int spi_qup_set_state(struct spi_qup *controller, u32 state)
 166{
 167	unsigned long loop;
 168	u32 cur_state;
 169
 170	loop = 0;
 171	while (!spi_qup_is_valid_state(controller)) {
 172
 173		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
 174
 175		if (++loop > SPI_DELAY_RETRY)
 176			return -EIO;
 177	}
 178
 179	if (loop)
 180		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
 181			loop, state);
 182
 183	cur_state = readl_relaxed(controller->base + QUP_STATE);
 184	/*
 185	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
 186	 * of (b10) are required
 187	 */
 188	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
 189	    (state == QUP_STATE_RESET)) {
 190		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
 191		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
 192	} else {
 193		cur_state &= ~QUP_STATE_MASK;
 194		cur_state |= state;
 195		writel_relaxed(cur_state, controller->base + QUP_STATE);
 196	}
 197
 198	loop = 0;
 199	while (!spi_qup_is_valid_state(controller)) {
 200
 201		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
 202
 203		if (++loop > SPI_DELAY_RETRY)
 204			return -EIO;
 205	}
 206
 207	return 0;
 208}
 209
 210static void spi_qup_fifo_read(struct spi_qup *controller,
 211			    struct spi_transfer *xfer)
 212{
 213	u8 *rx_buf = xfer->rx_buf;
 214	u32 word, state;
 215	int idx, shift, w_size;
 216
 217	w_size = controller->w_size;
 218
 219	while (controller->rx_bytes < xfer->len) {
 220
 221		state = readl_relaxed(controller->base + QUP_OPERATIONAL);
 222		if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
 223			break;
 224
 225		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
 
 
 226
 227		if (!rx_buf) {
 228			controller->rx_bytes += w_size;
 229			continue;
 230		}
 231
 232		for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
 233			/*
 234			 * The data format depends on bytes per SPI word:
 235			 *  4 bytes: 0x12345678
 236			 *  2 bytes: 0x00001234
 237			 *  1 byte : 0x00000012
 238			 */
 239			shift = BITS_PER_BYTE;
 240			shift *= (w_size - idx - 1);
 241			rx_buf[controller->rx_bytes] = word >> shift;
 242		}
 243	}
 244}
 245
 246static void spi_qup_fifo_write(struct spi_qup *controller,
 247			    struct spi_transfer *xfer)
 248{
 249	const u8 *tx_buf = xfer->tx_buf;
 250	u32 word, state, data;
 251	int idx, w_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 252
 253	w_size = controller->w_size;
 
 254
 255	while (controller->tx_bytes < xfer->len) {
 256
 257		state = readl_relaxed(controller->base + QUP_OPERATIONAL);
 258		if (state & QUP_OP_OUT_FIFO_FULL)
 
 259			break;
 260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261		word = 0;
 262		for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
 263
 264			if (!tx_buf) {
 265				controller->tx_bytes += w_size;
 266				break;
 
 
 
 
 267			}
 268
 269			data = tx_buf[controller->tx_bytes];
 270			word |= data << (BITS_PER_BYTE * (3 - idx));
 271		}
 272
 273		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
 274	}
 275}
 276
 277static void spi_qup_dma_done(void *data)
 278{
 279	struct spi_qup *qup = data;
 280
 281	complete(&qup->done);
 282}
 283
 284static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
 285			   enum dma_transfer_direction dir,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 286			   dma_async_tx_callback callback)
 287{
 288	struct spi_qup *qup = spi_master_get_devdata(master);
 289	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
 290	struct dma_async_tx_descriptor *desc;
 291	struct scatterlist *sgl;
 292	struct dma_chan *chan;
 293	dma_cookie_t cookie;
 294	unsigned int nents;
 295
 296	if (dir == DMA_MEM_TO_DEV) {
 297		chan = master->dma_tx;
 298		nents = xfer->tx_sg.nents;
 299		sgl = xfer->tx_sg.sgl;
 300	} else {
 301		chan = master->dma_rx;
 302		nents = xfer->rx_sg.nents;
 303		sgl = xfer->rx_sg.sgl;
 304	}
 305
 306	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
 307	if (!desc)
 308		return -EINVAL;
 309
 310	desc->callback = callback;
 311	desc->callback_param = qup;
 312
 313	cookie = dmaengine_submit(desc);
 314
 315	return dma_submit_error(cookie);
 316}
 317
 318static void spi_qup_dma_terminate(struct spi_master *master,
 319				  struct spi_transfer *xfer)
 320{
 321	if (xfer->tx_buf)
 322		dmaengine_terminate_all(master->dma_tx);
 323	if (xfer->rx_buf)
 324		dmaengine_terminate_all(master->dma_rx);
 325}
 326
 327static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 328{
 329	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
 
 
 
 330	int ret;
 331
 
 
 
 
 
 
 332	if (xfer->rx_buf)
 333		rx_done = spi_qup_dma_done;
 334	else if (xfer->tx_buf)
 335		tx_done = spi_qup_dma_done;
 336
 337	if (xfer->rx_buf) {
 338		ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339		if (ret)
 340			return ret;
 341
 342		dma_async_issue_pending(master->dma_rx);
 343	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 344
 345	if (xfer->tx_buf) {
 346		ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
 347		if (ret)
 348			return ret;
 349
 350		dma_async_issue_pending(master->dma_tx);
 351	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 352
 353	return 0;
 354}
 355
 356static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
 357{
 358	struct spi_qup *qup = spi_master_get_devdata(master);
 359	int ret;
 360
 361	ret = spi_qup_set_state(qup, QUP_STATE_RUN);
 362	if (ret) {
 363		dev_warn(qup->dev, "cannot set RUN state\n");
 364		return ret;
 365	}
 366
 367	ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
 368	if (ret) {
 369		dev_warn(qup->dev, "cannot set PAUSE state\n");
 370		return ret;
 371	}
 372
 373	spi_qup_fifo_write(qup, xfer);
 374
 375	return 0;
 376}
 377
 378static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
 379{
 380	struct spi_qup *controller = dev_id;
 381	struct spi_transfer *xfer;
 382	u32 opflags, qup_err, spi_err;
 383	unsigned long flags;
 384	int error = 0;
 385
 386	spin_lock_irqsave(&controller->lock, flags);
 387	xfer = controller->xfer;
 388	controller->xfer = NULL;
 389	spin_unlock_irqrestore(&controller->lock, flags);
 390
 391	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
 392	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
 393	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
 394
 395	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
 396	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
 397	writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
 398
 399	if (!xfer) {
 400		dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
 401				    qup_err, spi_err, opflags);
 402		return IRQ_HANDLED;
 403	}
 404
 405	if (qup_err) {
 406		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
 407			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
 408		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
 409			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
 410		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
 411			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
 412		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
 413			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
 414
 415		error = -EIO;
 416	}
 417
 418	if (spi_err) {
 419		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
 420			dev_warn(controller->dev, "CLK_OVER_RUN\n");
 421		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
 422			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
 423
 424		error = -EIO;
 425	}
 426
 427	if (!controller->use_dma) {
 
 
 
 
 
 
 
 428		if (opflags & QUP_OP_IN_SERVICE_FLAG)
 429			spi_qup_fifo_read(controller, xfer);
 430
 431		if (opflags & QUP_OP_OUT_SERVICE_FLAG)
 432			spi_qup_fifo_write(controller, xfer);
 
 
 
 433	}
 434
 435	spin_lock_irqsave(&controller->lock, flags);
 436	controller->error = error;
 437	controller->xfer = xfer;
 438	spin_unlock_irqrestore(&controller->lock, flags);
 439
 440	if (controller->rx_bytes == xfer->len || error)
 
 
 
 
 441		complete(&controller->done);
 
 442
 443	return IRQ_HANDLED;
 444}
 445
 446static u32
 447spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer)
 448{
 449	struct spi_qup *qup = spi_master_get_devdata(master);
 450	u32 mode;
 451
 452	qup->w_size = 4;
 
 
 
 
 453
 454	if (xfer->bits_per_word <= 8)
 455		qup->w_size = 1;
 456	else if (xfer->bits_per_word <= 16)
 457		qup->w_size = 2;
 
 
 458
 459	qup->n_words = xfer->len / qup->w_size;
 
 460
 461	if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
 462		mode = QUP_IO_M_MODE_FIFO;
 
 
 463	else
 464		mode = QUP_IO_M_MODE_BLOCK;
 465
 466	return mode;
 467}
 468
 469/* set clock freq ... bits per word */
 470static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
 471{
 472	struct spi_qup *controller = spi_master_get_devdata(spi->master);
 473	u32 config, iomode, mode, control;
 474	int ret, n_words;
 475
 476	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
 477		dev_err(controller->dev, "too big size for loopback %d > %d\n",
 478			xfer->len, controller->in_fifo_sz);
 479		return -EIO;
 480	}
 
 481
 482	ret = clk_set_rate(controller->cclk, xfer->speed_hz);
 483	if (ret) {
 484		dev_err(controller->dev, "fail to set frequency %d",
 485			xfer->speed_hz);
 486		return -EIO;
 487	}
 488
 489	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
 490		dev_err(controller->dev, "cannot set RESET state\n");
 491		return -EIO;
 492	}
 493
 494	mode = spi_qup_get_mode(spi->master, xfer);
 495	n_words = controller->n_words;
 496
 497	if (mode == QUP_IO_M_MODE_FIFO) {
 498		writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
 499		writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
 500		/* must be zero for FIFO */
 501		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
 502		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
 503	} else if (!controller->use_dma) {
 504		writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
 505		writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
 
 
 
 506		/* must be zero for BLOCK and BAM */
 507		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
 508		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
 509	} else {
 510		mode = QUP_IO_M_MODE_BAM;
 511		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
 512		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
 513
 514		if (!controller->qup_v1) {
 515			void __iomem *input_cnt;
 516
 517			input_cnt = controller->base + QUP_MX_INPUT_CNT;
 518			/*
 519			 * for DMA transfers, both QUP_MX_INPUT_CNT and
 520			 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
 521			 * That case is a non-balanced transfer when there is
 522			 * only a rx_buf.
 523			 */
 524			if (xfer->tx_buf)
 525				writel_relaxed(0, input_cnt);
 526			else
 527				writel_relaxed(n_words, input_cnt);
 528
 529			writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
 530		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 531	}
 532
 533	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
 534	/* Set input and output transfer mode */
 535	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
 536
 537	if (!controller->use_dma)
 538		iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
 539	else
 540		iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
 541
 542	iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
 543	iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
 544
 545	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
 546
 547	control = readl_relaxed(controller->base + SPI_IO_CONTROL);
 548
 549	if (spi->mode & SPI_CPOL)
 550		control |= SPI_IO_C_CLK_IDLE_HIGH;
 551	else
 552		control &= ~SPI_IO_C_CLK_IDLE_HIGH;
 553
 554	writel_relaxed(control, controller->base + SPI_IO_CONTROL);
 555
 556	config = readl_relaxed(controller->base + SPI_CONFIG);
 557
 558	if (spi->mode & SPI_LOOP)
 559		config |= SPI_CONFIG_LOOPBACK;
 560	else
 561		config &= ~SPI_CONFIG_LOOPBACK;
 562
 563	if (spi->mode & SPI_CPHA)
 564		config &= ~SPI_CONFIG_INPUT_FIRST;
 565	else
 566		config |= SPI_CONFIG_INPUT_FIRST;
 567
 568	/*
 569	 * HS_MODE improves signal stability for spi-clk high rates,
 570	 * but is invalid in loop back mode.
 571	 */
 572	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
 573		config |= SPI_CONFIG_HS_MODE;
 574	else
 575		config &= ~SPI_CONFIG_HS_MODE;
 576
 577	writel_relaxed(config, controller->base + SPI_CONFIG);
 578
 579	config = readl_relaxed(controller->base + QUP_CONFIG);
 580	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
 581	config |= xfer->bits_per_word - 1;
 582	config |= QUP_CONFIG_SPI_MODE;
 583
 584	if (controller->use_dma) {
 585		if (!xfer->tx_buf)
 586			config |= QUP_CONFIG_NO_OUTPUT;
 587		if (!xfer->rx_buf)
 588			config |= QUP_CONFIG_NO_INPUT;
 589	}
 590
 591	writel_relaxed(config, controller->base + QUP_CONFIG);
 592
 593	/* only write to OPERATIONAL_MASK when register is present */
 594	if (!controller->qup_v1) {
 595		u32 mask = 0;
 596
 597		/*
 598		 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
 599		 * status change in BAM mode
 600		 */
 601
 602		if (mode == QUP_IO_M_MODE_BAM)
 603			mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
 604
 605		writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
 606	}
 607
 608	return 0;
 609}
 610
 611static int spi_qup_transfer_one(struct spi_master *master,
 612			      struct spi_device *spi,
 613			      struct spi_transfer *xfer)
 614{
 615	struct spi_qup *controller = spi_master_get_devdata(master);
 616	unsigned long timeout, flags;
 617	int ret = -EIO;
 618
 619	ret = spi_qup_io_config(spi, xfer);
 620	if (ret)
 621		return ret;
 622
 623	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
 624	timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
 
 625	timeout = 100 * msecs_to_jiffies(timeout);
 626
 627	reinit_completion(&controller->done);
 628
 629	spin_lock_irqsave(&controller->lock, flags);
 630	controller->xfer     = xfer;
 631	controller->error    = 0;
 632	controller->rx_bytes = 0;
 633	controller->tx_bytes = 0;
 634	spin_unlock_irqrestore(&controller->lock, flags);
 635
 636	if (controller->use_dma)
 637		ret = spi_qup_do_dma(master, xfer);
 638	else
 639		ret = spi_qup_do_pio(master, xfer);
 640
 641	if (ret)
 642		goto exit;
 643
 644	if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
 645		dev_warn(controller->dev, "cannot set EXECUTE state\n");
 646		goto exit;
 647	}
 648
 649	if (!wait_for_completion_timeout(&controller->done, timeout))
 650		ret = -ETIMEDOUT;
 651
 652exit:
 653	spi_qup_set_state(controller, QUP_STATE_RESET);
 654	spin_lock_irqsave(&controller->lock, flags);
 655	controller->xfer = NULL;
 656	if (!ret)
 657		ret = controller->error;
 658	spin_unlock_irqrestore(&controller->lock, flags);
 659
 660	if (ret && controller->use_dma)
 661		spi_qup_dma_terminate(master, xfer);
 662
 663	return ret;
 664}
 665
 666static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
 667			    struct spi_transfer *xfer)
 668{
 669	struct spi_qup *qup = spi_master_get_devdata(master);
 670	size_t dma_align = dma_get_cache_alignment();
 671	u32 mode;
 672
 673	qup->use_dma = 0;
 
 
 
 
 
 
 674
 675	if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
 676	    IS_ERR_OR_NULL(master->dma_rx) ||
 677	    !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
 678		return false;
 
 
 
 679
 680	if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
 681	    IS_ERR_OR_NULL(master->dma_tx) ||
 682	    !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
 683		return false;
 684
 685	mode = spi_qup_get_mode(master, xfer);
 686	if (mode == QUP_IO_M_MODE_FIFO)
 687		return false;
 688
 689	qup->use_dma = 1;
 690
 691	return true;
 692}
 693
 694static void spi_qup_release_dma(struct spi_master *master)
 695{
 696	if (!IS_ERR_OR_NULL(master->dma_rx))
 697		dma_release_channel(master->dma_rx);
 698	if (!IS_ERR_OR_NULL(master->dma_tx))
 699		dma_release_channel(master->dma_tx);
 700}
 701
 702static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
 703{
 704	struct spi_qup *spi = spi_master_get_devdata(master);
 705	struct dma_slave_config *rx_conf = &spi->rx_conf,
 706				*tx_conf = &spi->tx_conf;
 707	struct device *dev = spi->dev;
 708	int ret;
 709
 710	/* allocate dma resources, if available */
 711	master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
 712	if (IS_ERR(master->dma_rx))
 713		return PTR_ERR(master->dma_rx);
 714
 715	master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
 716	if (IS_ERR(master->dma_tx)) {
 717		ret = PTR_ERR(master->dma_tx);
 718		goto err_tx;
 719	}
 720
 721	/* set DMA parameters */
 722	rx_conf->direction = DMA_DEV_TO_MEM;
 723	rx_conf->device_fc = 1;
 724	rx_conf->src_addr = base + QUP_INPUT_FIFO;
 725	rx_conf->src_maxburst = spi->in_blk_sz;
 726
 727	tx_conf->direction = DMA_MEM_TO_DEV;
 728	tx_conf->device_fc = 1;
 729	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
 730	tx_conf->dst_maxburst = spi->out_blk_sz;
 731
 732	ret = dmaengine_slave_config(master->dma_rx, rx_conf);
 733	if (ret) {
 734		dev_err(dev, "failed to configure RX channel\n");
 735		goto err;
 736	}
 737
 738	ret = dmaengine_slave_config(master->dma_tx, tx_conf);
 739	if (ret) {
 740		dev_err(dev, "failed to configure TX channel\n");
 741		goto err;
 742	}
 743
 744	return 0;
 745
 746err:
 747	dma_release_channel(master->dma_tx);
 748err_tx:
 749	dma_release_channel(master->dma_rx);
 750	return ret;
 751}
 752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753static int spi_qup_probe(struct platform_device *pdev)
 754{
 755	struct spi_master *master;
 
 756	struct clk *iclk, *cclk;
 757	struct spi_qup *controller;
 758	struct resource *res;
 759	struct device *dev;
 760	void __iomem *base;
 761	u32 max_freq, iomode, num_cs;
 762	int ret, irq, size;
 763
 764	dev = &pdev->dev;
 765	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 766	base = devm_ioremap_resource(dev, res);
 767	if (IS_ERR(base))
 768		return PTR_ERR(base);
 769
 770	irq = platform_get_irq(pdev, 0);
 771	if (irq < 0)
 772		return irq;
 773
 774	cclk = devm_clk_get(dev, "core");
 775	if (IS_ERR(cclk))
 776		return PTR_ERR(cclk);
 777
 778	iclk = devm_clk_get(dev, "iface");
 779	if (IS_ERR(iclk))
 780		return PTR_ERR(iclk);
 781
 
 
 
 
 
 782	/* This is optional parameter */
 783	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
 784		max_freq = SPI_MAX_RATE;
 785
 786	if (!max_freq || max_freq > SPI_MAX_RATE) {
 787		dev_err(dev, "invalid clock frequency %d\n", max_freq);
 788		return -ENXIO;
 789	}
 790
 791	ret = clk_prepare_enable(cclk);
 792	if (ret) {
 793		dev_err(dev, "cannot enable core clock\n");
 794		return ret;
 795	}
 796
 797	ret = clk_prepare_enable(iclk);
 798	if (ret) {
 799		clk_disable_unprepare(cclk);
 800		dev_err(dev, "cannot enable iface clock\n");
 801		return ret;
 802	}
 803
 804	master = spi_alloc_master(dev, sizeof(struct spi_qup));
 805	if (!master) {
 806		clk_disable_unprepare(cclk);
 807		clk_disable_unprepare(iclk);
 808		dev_err(dev, "cannot allocate master\n");
 
 
 
 809		return -ENOMEM;
 810	}
 811
 812	/* use num-cs unless not present or out of range */
 813	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
 814	    num_cs > SPI_NUM_CHIPSELECTS)
 815		master->num_chipselect = SPI_NUM_CHIPSELECTS;
 816	else
 817		master->num_chipselect = num_cs;
 818
 819	master->bus_num = pdev->id;
 820	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
 821	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
 822	master->max_speed_hz = max_freq;
 823	master->transfer_one = spi_qup_transfer_one;
 824	master->dev.of_node = pdev->dev.of_node;
 825	master->auto_runtime_pm = true;
 826	master->dma_alignment = dma_get_cache_alignment();
 827	master->max_dma_len = SPI_MAX_DMA_XFER;
 
 
 828
 829	platform_set_drvdata(pdev, master);
 830
 831	controller = spi_master_get_devdata(master);
 832
 833	controller->dev = dev;
 834	controller->base = base;
 835	controller->iclk = iclk;
 836	controller->cclk = cclk;
 
 837	controller->irq = irq;
 838
 839	ret = spi_qup_init_dma(master, res->start);
 840	if (ret == -EPROBE_DEFER)
 841		goto error;
 842	else if (!ret)
 843		master->can_dma = spi_qup_can_dma;
 844
 845	/* set v1 flag if device is version 1 */
 846	if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
 847		controller->qup_v1 = 1;
 
 848
 849	spin_lock_init(&controller->lock);
 850	init_completion(&controller->done);
 851
 
 
 
 
 
 
 
 
 
 
 
 
 
 852	iomode = readl_relaxed(base + QUP_IO_M_MODES);
 853
 854	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
 855	if (size)
 856		controller->out_blk_sz = size * 16;
 857	else
 858		controller->out_blk_sz = 4;
 859
 860	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
 861	if (size)
 862		controller->in_blk_sz = size * 16;
 863	else
 864		controller->in_blk_sz = 4;
 865
 866	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
 867	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
 868
 869	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
 870	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
 871
 872	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
 873		 controller->in_blk_sz, controller->in_fifo_sz,
 874		 controller->out_blk_sz, controller->out_fifo_sz);
 875
 876	writel_relaxed(1, base + QUP_SW_RESET);
 877
 878	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
 879	if (ret) {
 880		dev_err(dev, "cannot set RESET state\n");
 881		goto error_dma;
 882	}
 883
 884	writel_relaxed(0, base + QUP_OPERATIONAL);
 885	writel_relaxed(0, base + QUP_IO_M_MODES);
 886
 887	if (!controller->qup_v1)
 888		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
 889
 890	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
 891		       base + SPI_ERROR_FLAGS_EN);
 892
 893	/* if earlier version of the QUP, disable INPUT_OVERRUN */
 894	if (controller->qup_v1)
 895		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
 896			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
 897			base + QUP_ERROR_FLAGS_EN);
 898
 899	writel_relaxed(0, base + SPI_CONFIG);
 900	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
 901
 902	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
 903			       IRQF_TRIGGER_HIGH, pdev->name, controller);
 904	if (ret)
 905		goto error_dma;
 906
 907	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
 908	pm_runtime_use_autosuspend(dev);
 909	pm_runtime_set_active(dev);
 910	pm_runtime_enable(dev);
 911
 912	ret = devm_spi_register_master(dev, master);
 913	if (ret)
 914		goto disable_pm;
 915
 916	return 0;
 917
 918disable_pm:
 919	pm_runtime_disable(&pdev->dev);
 
 
 
 920error_dma:
 921	spi_qup_release_dma(master);
 922error:
 923	clk_disable_unprepare(cclk);
 924	clk_disable_unprepare(iclk);
 925	spi_master_put(master);
 926	return ret;
 927}
 928
 929#ifdef CONFIG_PM
 930static int spi_qup_pm_suspend_runtime(struct device *device)
 931{
 932	struct spi_master *master = dev_get_drvdata(device);
 933	struct spi_qup *controller = spi_master_get_devdata(master);
 934	u32 config;
 935
 936	/* Enable clocks auto gaiting */
 937	config = readl(controller->base + QUP_CONFIG);
 938	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
 939	writel_relaxed(config, controller->base + QUP_CONFIG);
 
 
 
 
 
 940	return 0;
 941}
 942
 943static int spi_qup_pm_resume_runtime(struct device *device)
 944{
 945	struct spi_master *master = dev_get_drvdata(device);
 946	struct spi_qup *controller = spi_master_get_devdata(master);
 947	u32 config;
 
 
 
 
 
 
 
 
 
 
 
 948
 949	/* Disable clocks auto gaiting */
 950	config = readl_relaxed(controller->base + QUP_CONFIG);
 951	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
 952	writel_relaxed(config, controller->base + QUP_CONFIG);
 953	return 0;
 954}
 955#endif /* CONFIG_PM */
 956
 957#ifdef CONFIG_PM_SLEEP
 958static int spi_qup_suspend(struct device *device)
 959{
 960	struct spi_master *master = dev_get_drvdata(device);
 961	struct spi_qup *controller = spi_master_get_devdata(master);
 962	int ret;
 963
 964	ret = spi_master_suspend(master);
 
 
 
 
 
 965	if (ret)
 966		return ret;
 967
 968	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
 969	if (ret)
 970		return ret;
 971
 972	clk_disable_unprepare(controller->cclk);
 
 973	clk_disable_unprepare(controller->iclk);
 974	return 0;
 975}
 976
 977static int spi_qup_resume(struct device *device)
 978{
 979	struct spi_master *master = dev_get_drvdata(device);
 980	struct spi_qup *controller = spi_master_get_devdata(master);
 981	int ret;
 982
 983	ret = clk_prepare_enable(controller->iclk);
 984	if (ret)
 985		return ret;
 986
 987	ret = clk_prepare_enable(controller->cclk);
 988	if (ret)
 
 989		return ret;
 
 990
 991	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
 992	if (ret)
 993		return ret;
 
 
 
 
 
 
 994
 995	return spi_master_resume(master);
 
 
 
 996}
 997#endif /* CONFIG_PM_SLEEP */
 998
 999static int spi_qup_remove(struct platform_device *pdev)
1000{
1001	struct spi_master *master = dev_get_drvdata(&pdev->dev);
1002	struct spi_qup *controller = spi_master_get_devdata(master);
1003	int ret;
1004
1005	ret = pm_runtime_get_sync(&pdev->dev);
1006	if (ret < 0)
1007		return ret;
1008
1009	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1010	if (ret)
1011		return ret;
 
 
1012
1013	spi_qup_release_dma(master);
 
 
 
 
 
1014
1015	clk_disable_unprepare(controller->cclk);
1016	clk_disable_unprepare(controller->iclk);
1017
1018	pm_runtime_put_noidle(&pdev->dev);
1019	pm_runtime_disable(&pdev->dev);
1020	return 0;
1021}
1022
1023static const struct of_device_id spi_qup_dt_match[] = {
1024	{ .compatible = "qcom,spi-qup-v1.1.1", },
1025	{ .compatible = "qcom,spi-qup-v2.1.1", },
1026	{ .compatible = "qcom,spi-qup-v2.2.1", },
1027	{ }
1028};
1029MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1030
1031static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1032	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1033	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1034			   spi_qup_pm_resume_runtime,
1035			   NULL)
1036};
1037
1038static struct platform_driver spi_qup_driver = {
1039	.driver = {
1040		.name		= "spi_qup",
1041		.pm		= &spi_qup_dev_pm_ops,
1042		.of_match_table = spi_qup_dt_match,
1043	},
1044	.probe = spi_qup_probe,
1045	.remove = spi_qup_remove,
1046};
1047module_platform_driver(spi_qup_driver);
1048
 
1049MODULE_LICENSE("GPL v2");
1050MODULE_ALIAS("platform:spi_qup");