Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/dma-mapping.h>
9#include <linux/dmaengine.h>
10#include <linux/err.h>
11#include <linux/interconnect.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/pm_opp.h>
19#include <linux/pm_runtime.h>
20#include <linux/spi/spi.h>
21#include "internals.h"
22
23#define QUP_CONFIG 0x0000
24#define QUP_STATE 0x0004
25#define QUP_IO_M_MODES 0x0008
26#define QUP_SW_RESET 0x000c
27#define QUP_OPERATIONAL 0x0018
28#define QUP_ERROR_FLAGS 0x001c
29#define QUP_ERROR_FLAGS_EN 0x0020
30#define QUP_OPERATIONAL_MASK 0x0028
31#define QUP_HW_VERSION 0x0030
32#define QUP_MX_OUTPUT_CNT 0x0100
33#define QUP_OUTPUT_FIFO 0x0110
34#define QUP_MX_WRITE_CNT 0x0150
35#define QUP_MX_INPUT_CNT 0x0200
36#define QUP_MX_READ_CNT 0x0208
37#define QUP_INPUT_FIFO 0x0218
38
39#define SPI_CONFIG 0x0300
40#define SPI_IO_CONTROL 0x0304
41#define SPI_ERROR_FLAGS 0x0308
42#define SPI_ERROR_FLAGS_EN 0x030c
43
44/* QUP_CONFIG fields */
45#define QUP_CONFIG_SPI_MODE (1 << 8)
46#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
47#define QUP_CONFIG_NO_INPUT BIT(7)
48#define QUP_CONFIG_NO_OUTPUT BIT(6)
49#define QUP_CONFIG_N 0x001f
50
51/* QUP_STATE fields */
52#define QUP_STATE_VALID BIT(2)
53#define QUP_STATE_RESET 0
54#define QUP_STATE_RUN 1
55#define QUP_STATE_PAUSE 3
56#define QUP_STATE_MASK 3
57#define QUP_STATE_CLEAR 2
58
59#define QUP_HW_VERSION_2_1_1 0x20010001
60
61/* QUP_IO_M_MODES fields */
62#define QUP_IO_M_PACK_EN BIT(15)
63#define QUP_IO_M_UNPACK_EN BIT(14)
64#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
65#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
66#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
67#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
68
69#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
70#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
71#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
72#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
73
74#define QUP_IO_M_MODE_FIFO 0
75#define QUP_IO_M_MODE_BLOCK 1
76#define QUP_IO_M_MODE_DMOV 2
77#define QUP_IO_M_MODE_BAM 3
78
79/* QUP_OPERATIONAL fields */
80#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
81#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
82#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
83#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
84#define QUP_OP_IN_SERVICE_FLAG BIT(9)
85#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
86#define QUP_OP_IN_FIFO_FULL BIT(7)
87#define QUP_OP_OUT_FIFO_FULL BIT(6)
88#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
89#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
90
91/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
92#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
93#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
94#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
95#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
96
97/* SPI_CONFIG fields */
98#define SPI_CONFIG_HS_MODE BIT(10)
99#define SPI_CONFIG_INPUT_FIRST BIT(9)
100#define SPI_CONFIG_LOOPBACK BIT(8)
101
102/* SPI_IO_CONTROL fields */
103#define SPI_IO_C_FORCE_CS BIT(11)
104#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
105#define SPI_IO_C_MX_CS_MODE BIT(8)
106#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
107#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
108#define SPI_IO_C_CS_SELECT_MASK 0x000c
109#define SPI_IO_C_TRISTATE_CS BIT(1)
110#define SPI_IO_C_NO_TRI_STATE BIT(0)
111
112/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
113#define SPI_ERROR_CLK_OVER_RUN BIT(1)
114#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
115
116#define SPI_NUM_CHIPSELECTS 4
117
118#define SPI_MAX_XFER (SZ_64K - 64)
119
120/* high speed mode is when bus rate is greater then 26MHz */
121#define SPI_HS_MIN_RATE 26000000
122#define SPI_MAX_RATE 50000000
123
124#define SPI_DELAY_THRESHOLD 1
125#define SPI_DELAY_RETRY 10
126
127#define SPI_BUS_WIDTH 8
128
129struct spi_qup {
130 void __iomem *base;
131 struct device *dev;
132 struct clk *cclk; /* core clock */
133 struct clk *iclk; /* interface clock */
134 struct icc_path *icc_path; /* interconnect to RAM */
135 int irq;
136 spinlock_t lock;
137
138 int in_fifo_sz;
139 int out_fifo_sz;
140 int in_blk_sz;
141 int out_blk_sz;
142
143 struct spi_transfer *xfer;
144 struct completion done;
145 int error;
146 int w_size; /* bytes per SPI word */
147 int n_words;
148 int tx_bytes;
149 int rx_bytes;
150 const u8 *tx_buf;
151 u8 *rx_buf;
152 int qup_v1;
153
154 int mode;
155 struct dma_slave_config rx_conf;
156 struct dma_slave_config tx_conf;
157
158 u32 bw_speed_hz;
159};
160
161static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
162
163static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
164{
165 u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
166
167 return (opflag & flag) != 0;
168}
169
170static inline bool spi_qup_is_dma_xfer(int mode)
171{
172 if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
173 return true;
174
175 return false;
176}
177
178/* get's the transaction size length */
179static inline unsigned int spi_qup_len(struct spi_qup *controller)
180{
181 return controller->n_words * controller->w_size;
182}
183
184static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
185{
186 u32 opstate = readl_relaxed(controller->base + QUP_STATE);
187
188 return opstate & QUP_STATE_VALID;
189}
190
191static int spi_qup_vote_bw(struct spi_qup *controller, u32 speed_hz)
192{
193 u32 needed_peak_bw;
194 int ret;
195
196 if (controller->bw_speed_hz == speed_hz)
197 return 0;
198
199 needed_peak_bw = Bps_to_icc(speed_hz * SPI_BUS_WIDTH);
200 ret = icc_set_bw(controller->icc_path, 0, needed_peak_bw);
201 if (ret)
202 return ret;
203
204 controller->bw_speed_hz = speed_hz;
205 return 0;
206}
207
208static int spi_qup_set_state(struct spi_qup *controller, u32 state)
209{
210 unsigned long loop;
211 u32 cur_state;
212
213 loop = 0;
214 while (!spi_qup_is_valid_state(controller)) {
215
216 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
217
218 if (++loop > SPI_DELAY_RETRY)
219 return -EIO;
220 }
221
222 if (loop)
223 dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
224 loop, state);
225
226 cur_state = readl_relaxed(controller->base + QUP_STATE);
227 /*
228 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
229 * of (b10) are required
230 */
231 if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
232 (state == QUP_STATE_RESET)) {
233 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
234 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
235 } else {
236 cur_state &= ~QUP_STATE_MASK;
237 cur_state |= state;
238 writel_relaxed(cur_state, controller->base + QUP_STATE);
239 }
240
241 loop = 0;
242 while (!spi_qup_is_valid_state(controller)) {
243
244 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
245
246 if (++loop > SPI_DELAY_RETRY)
247 return -EIO;
248 }
249
250 return 0;
251}
252
253static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
254{
255 u8 *rx_buf = controller->rx_buf;
256 int i, shift, num_bytes;
257 u32 word;
258
259 for (; num_words; num_words--) {
260
261 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
262
263 num_bytes = min_t(int, spi_qup_len(controller) -
264 controller->rx_bytes,
265 controller->w_size);
266
267 if (!rx_buf) {
268 controller->rx_bytes += num_bytes;
269 continue;
270 }
271
272 for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
273 /*
274 * The data format depends on bytes per SPI word:
275 * 4 bytes: 0x12345678
276 * 2 bytes: 0x00001234
277 * 1 byte : 0x00000012
278 */
279 shift = BITS_PER_BYTE;
280 shift *= (controller->w_size - i - 1);
281 rx_buf[controller->rx_bytes] = word >> shift;
282 }
283 }
284}
285
286static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
287{
288 u32 remainder, words_per_block, num_words;
289 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
290
291 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
292 controller->w_size);
293 words_per_block = controller->in_blk_sz >> 2;
294
295 do {
296 /* ACK by clearing service flag */
297 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
298 controller->base + QUP_OPERATIONAL);
299
300 if (!remainder)
301 goto exit;
302
303 if (is_block_mode) {
304 num_words = (remainder > words_per_block) ?
305 words_per_block : remainder;
306 } else {
307 if (!spi_qup_is_flag_set(controller,
308 QUP_OP_IN_FIFO_NOT_EMPTY))
309 break;
310
311 num_words = 1;
312 }
313
314 /* read up to the maximum transfer size available */
315 spi_qup_read_from_fifo(controller, num_words);
316
317 remainder -= num_words;
318
319 /* if block mode, check to see if next block is available */
320 if (is_block_mode && !spi_qup_is_flag_set(controller,
321 QUP_OP_IN_BLOCK_READ_REQ))
322 break;
323
324 } while (remainder);
325
326 /*
327 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
328 * reads, it has to be cleared again at the very end. However, be sure
329 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
330 * present and this is used to determine if transaction is complete
331 */
332exit:
333 if (!remainder) {
334 *opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
335 if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
336 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
337 controller->base + QUP_OPERATIONAL);
338 }
339}
340
341static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
342{
343 const u8 *tx_buf = controller->tx_buf;
344 int i, num_bytes;
345 u32 word, data;
346
347 for (; num_words; num_words--) {
348 word = 0;
349
350 num_bytes = min_t(int, spi_qup_len(controller) -
351 controller->tx_bytes,
352 controller->w_size);
353 if (tx_buf)
354 for (i = 0; i < num_bytes; i++) {
355 data = tx_buf[controller->tx_bytes + i];
356 word |= data << (BITS_PER_BYTE * (3 - i));
357 }
358
359 controller->tx_bytes += num_bytes;
360
361 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
362 }
363}
364
365static void spi_qup_dma_done(void *data)
366{
367 struct spi_qup *qup = data;
368
369 complete(&qup->done);
370}
371
372static void spi_qup_write(struct spi_qup *controller)
373{
374 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
375 u32 remainder, words_per_block, num_words;
376
377 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
378 controller->w_size);
379 words_per_block = controller->out_blk_sz >> 2;
380
381 do {
382 /* ACK by clearing service flag */
383 writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
384 controller->base + QUP_OPERATIONAL);
385
386 /* make sure the interrupt is valid */
387 if (!remainder)
388 return;
389
390 if (is_block_mode) {
391 num_words = (remainder > words_per_block) ?
392 words_per_block : remainder;
393 } else {
394 if (spi_qup_is_flag_set(controller,
395 QUP_OP_OUT_FIFO_FULL))
396 break;
397
398 num_words = 1;
399 }
400
401 spi_qup_write_to_fifo(controller, num_words);
402
403 remainder -= num_words;
404
405 /* if block mode, check to see if next block is available */
406 if (is_block_mode && !spi_qup_is_flag_set(controller,
407 QUP_OP_OUT_BLOCK_WRITE_REQ))
408 break;
409
410 } while (remainder);
411}
412
413static int spi_qup_prep_sg(struct spi_controller *host, struct scatterlist *sgl,
414 unsigned int nents, enum dma_transfer_direction dir,
415 dma_async_tx_callback callback)
416{
417 struct spi_qup *qup = spi_controller_get_devdata(host);
418 unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
419 struct dma_async_tx_descriptor *desc;
420 struct dma_chan *chan;
421 dma_cookie_t cookie;
422
423 if (dir == DMA_MEM_TO_DEV)
424 chan = host->dma_tx;
425 else
426 chan = host->dma_rx;
427
428 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
429 if (IS_ERR_OR_NULL(desc))
430 return desc ? PTR_ERR(desc) : -EINVAL;
431
432 desc->callback = callback;
433 desc->callback_param = qup;
434
435 cookie = dmaengine_submit(desc);
436
437 return dma_submit_error(cookie);
438}
439
440static void spi_qup_dma_terminate(struct spi_controller *host,
441 struct spi_transfer *xfer)
442{
443 if (xfer->tx_buf)
444 dmaengine_terminate_all(host->dma_tx);
445 if (xfer->rx_buf)
446 dmaengine_terminate_all(host->dma_rx);
447}
448
449static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
450 u32 *nents)
451{
452 struct scatterlist *sg;
453 u32 total = 0;
454
455 for (sg = sgl; sg; sg = sg_next(sg)) {
456 unsigned int len = sg_dma_len(sg);
457
458 /* check for overflow as well as limit */
459 if (((total + len) < total) || ((total + len) > max))
460 break;
461
462 total += len;
463 (*nents)++;
464 }
465
466 return total;
467}
468
469static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
470 unsigned long timeout)
471{
472 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
473 struct spi_controller *host = spi->controller;
474 struct spi_qup *qup = spi_controller_get_devdata(host);
475 struct scatterlist *tx_sgl, *rx_sgl;
476 int ret;
477
478 ret = spi_qup_vote_bw(qup, xfer->speed_hz);
479 if (ret) {
480 dev_err(qup->dev, "fail to vote for ICC bandwidth: %d\n", ret);
481 return -EIO;
482 }
483
484 if (xfer->rx_buf)
485 rx_done = spi_qup_dma_done;
486 else if (xfer->tx_buf)
487 tx_done = spi_qup_dma_done;
488
489 rx_sgl = xfer->rx_sg.sgl;
490 tx_sgl = xfer->tx_sg.sgl;
491
492 do {
493 u32 rx_nents = 0, tx_nents = 0;
494
495 if (rx_sgl)
496 qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
497 SPI_MAX_XFER, &rx_nents) / qup->w_size;
498 if (tx_sgl)
499 qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
500 SPI_MAX_XFER, &tx_nents) / qup->w_size;
501 if (!qup->n_words)
502 return -EIO;
503
504 ret = spi_qup_io_config(spi, xfer);
505 if (ret)
506 return ret;
507
508 /* before issuing the descriptors, set the QUP to run */
509 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
510 if (ret) {
511 dev_warn(qup->dev, "cannot set RUN state\n");
512 return ret;
513 }
514 if (rx_sgl) {
515 ret = spi_qup_prep_sg(host, rx_sgl, rx_nents,
516 DMA_DEV_TO_MEM, rx_done);
517 if (ret)
518 return ret;
519 dma_async_issue_pending(host->dma_rx);
520 }
521
522 if (tx_sgl) {
523 ret = spi_qup_prep_sg(host, tx_sgl, tx_nents,
524 DMA_MEM_TO_DEV, tx_done);
525 if (ret)
526 return ret;
527
528 dma_async_issue_pending(host->dma_tx);
529 }
530
531 if (!wait_for_completion_timeout(&qup->done, timeout))
532 return -ETIMEDOUT;
533
534 for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
535 ;
536 for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
537 ;
538
539 } while (rx_sgl || tx_sgl);
540
541 return 0;
542}
543
544static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
545 unsigned long timeout)
546{
547 struct spi_controller *host = spi->controller;
548 struct spi_qup *qup = spi_controller_get_devdata(host);
549 int ret, n_words, iterations, offset = 0;
550
551 n_words = qup->n_words;
552 iterations = n_words / SPI_MAX_XFER; /* round down */
553 qup->rx_buf = xfer->rx_buf;
554 qup->tx_buf = xfer->tx_buf;
555
556 do {
557 if (iterations)
558 qup->n_words = SPI_MAX_XFER;
559 else
560 qup->n_words = n_words % SPI_MAX_XFER;
561
562 if (qup->tx_buf && offset)
563 qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
564
565 if (qup->rx_buf && offset)
566 qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
567
568 /*
569 * if the transaction is small enough, we need
570 * to fallback to FIFO mode
571 */
572 if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
573 qup->mode = QUP_IO_M_MODE_FIFO;
574
575 ret = spi_qup_io_config(spi, xfer);
576 if (ret)
577 return ret;
578
579 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
580 if (ret) {
581 dev_warn(qup->dev, "cannot set RUN state\n");
582 return ret;
583 }
584
585 ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
586 if (ret) {
587 dev_warn(qup->dev, "cannot set PAUSE state\n");
588 return ret;
589 }
590
591 if (qup->mode == QUP_IO_M_MODE_FIFO)
592 spi_qup_write(qup);
593
594 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
595 if (ret) {
596 dev_warn(qup->dev, "cannot set RUN state\n");
597 return ret;
598 }
599
600 if (!wait_for_completion_timeout(&qup->done, timeout))
601 return -ETIMEDOUT;
602
603 offset++;
604 } while (iterations--);
605
606 return 0;
607}
608
609static bool spi_qup_data_pending(struct spi_qup *controller)
610{
611 unsigned int remainder_tx, remainder_rx;
612
613 remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
614 controller->tx_bytes, controller->w_size);
615
616 remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
617 controller->rx_bytes, controller->w_size);
618
619 return remainder_tx || remainder_rx;
620}
621
622static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
623{
624 struct spi_qup *controller = dev_id;
625 u32 opflags, qup_err, spi_err;
626 int error = 0;
627
628 qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
629 spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
630 opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
631
632 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
633 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
634
635 if (qup_err) {
636 if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
637 dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
638 if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
639 dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
640 if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
641 dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
642 if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
643 dev_warn(controller->dev, "INPUT_OVER_RUN\n");
644
645 error = -EIO;
646 }
647
648 if (spi_err) {
649 if (spi_err & SPI_ERROR_CLK_OVER_RUN)
650 dev_warn(controller->dev, "CLK_OVER_RUN\n");
651 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
652 dev_warn(controller->dev, "CLK_UNDER_RUN\n");
653
654 error = -EIO;
655 }
656
657 spin_lock(&controller->lock);
658 if (!controller->error)
659 controller->error = error;
660 spin_unlock(&controller->lock);
661
662 if (spi_qup_is_dma_xfer(controller->mode)) {
663 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
664 } else {
665 if (opflags & QUP_OP_IN_SERVICE_FLAG)
666 spi_qup_read(controller, &opflags);
667
668 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
669 spi_qup_write(controller);
670
671 if (!spi_qup_data_pending(controller))
672 complete(&controller->done);
673 }
674
675 if (error)
676 complete(&controller->done);
677
678 if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
679 if (!spi_qup_is_dma_xfer(controller->mode)) {
680 if (spi_qup_data_pending(controller))
681 return IRQ_HANDLED;
682 }
683 complete(&controller->done);
684 }
685
686 return IRQ_HANDLED;
687}
688
689/* set clock freq ... bits per word, determine mode */
690static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
691{
692 struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
693 int ret;
694
695 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
696 dev_err(controller->dev, "too big size for loopback %d > %d\n",
697 xfer->len, controller->in_fifo_sz);
698 return -EIO;
699 }
700
701 ret = dev_pm_opp_set_rate(controller->dev, xfer->speed_hz);
702 if (ret) {
703 dev_err(controller->dev, "fail to set frequency %d",
704 xfer->speed_hz);
705 return -EIO;
706 }
707
708 controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
709 controller->n_words = xfer->len / controller->w_size;
710
711 if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
712 controller->mode = QUP_IO_M_MODE_FIFO;
713 else if (spi_xfer_is_dma_mapped(spi->controller, spi, xfer))
714 controller->mode = QUP_IO_M_MODE_BAM;
715 else
716 controller->mode = QUP_IO_M_MODE_BLOCK;
717
718 return 0;
719}
720
721/* prep qup for another spi transaction of specific type */
722static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
723{
724 struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
725 u32 config, iomode, control;
726 unsigned long flags;
727
728 spin_lock_irqsave(&controller->lock, flags);
729 controller->xfer = xfer;
730 controller->error = 0;
731 controller->rx_bytes = 0;
732 controller->tx_bytes = 0;
733 spin_unlock_irqrestore(&controller->lock, flags);
734
735
736 if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
737 dev_err(controller->dev, "cannot set RESET state\n");
738 return -EIO;
739 }
740
741 switch (controller->mode) {
742 case QUP_IO_M_MODE_FIFO:
743 writel_relaxed(controller->n_words,
744 controller->base + QUP_MX_READ_CNT);
745 writel_relaxed(controller->n_words,
746 controller->base + QUP_MX_WRITE_CNT);
747 /* must be zero for FIFO */
748 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
749 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
750 break;
751 case QUP_IO_M_MODE_BAM:
752 writel_relaxed(controller->n_words,
753 controller->base + QUP_MX_INPUT_CNT);
754 writel_relaxed(controller->n_words,
755 controller->base + QUP_MX_OUTPUT_CNT);
756 /* must be zero for BLOCK and BAM */
757 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
758 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
759
760 if (!controller->qup_v1) {
761 void __iomem *input_cnt;
762
763 input_cnt = controller->base + QUP_MX_INPUT_CNT;
764 /*
765 * for DMA transfers, both QUP_MX_INPUT_CNT and
766 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
767 * That case is a non-balanced transfer when there is
768 * only a rx_buf.
769 */
770 if (xfer->tx_buf)
771 writel_relaxed(0, input_cnt);
772 else
773 writel_relaxed(controller->n_words, input_cnt);
774
775 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
776 }
777 break;
778 case QUP_IO_M_MODE_BLOCK:
779 reinit_completion(&controller->done);
780 writel_relaxed(controller->n_words,
781 controller->base + QUP_MX_INPUT_CNT);
782 writel_relaxed(controller->n_words,
783 controller->base + QUP_MX_OUTPUT_CNT);
784 /* must be zero for BLOCK and BAM */
785 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
786 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
787 break;
788 default:
789 dev_err(controller->dev, "unknown mode = %d\n",
790 controller->mode);
791 return -EIO;
792 }
793
794 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
795 /* Set input and output transfer mode */
796 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
797
798 if (!spi_qup_is_dma_xfer(controller->mode))
799 iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
800 else
801 iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
802
803 iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
804 iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
805
806 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
807
808 control = readl_relaxed(controller->base + SPI_IO_CONTROL);
809
810 if (spi->mode & SPI_CPOL)
811 control |= SPI_IO_C_CLK_IDLE_HIGH;
812 else
813 control &= ~SPI_IO_C_CLK_IDLE_HIGH;
814
815 writel_relaxed(control, controller->base + SPI_IO_CONTROL);
816
817 config = readl_relaxed(controller->base + SPI_CONFIG);
818
819 if (spi->mode & SPI_LOOP)
820 config |= SPI_CONFIG_LOOPBACK;
821 else
822 config &= ~SPI_CONFIG_LOOPBACK;
823
824 if (spi->mode & SPI_CPHA)
825 config &= ~SPI_CONFIG_INPUT_FIRST;
826 else
827 config |= SPI_CONFIG_INPUT_FIRST;
828
829 /*
830 * HS_MODE improves signal stability for spi-clk high rates,
831 * but is invalid in loop back mode.
832 */
833 if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
834 config |= SPI_CONFIG_HS_MODE;
835 else
836 config &= ~SPI_CONFIG_HS_MODE;
837
838 writel_relaxed(config, controller->base + SPI_CONFIG);
839
840 config = readl_relaxed(controller->base + QUP_CONFIG);
841 config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
842 config |= xfer->bits_per_word - 1;
843 config |= QUP_CONFIG_SPI_MODE;
844
845 if (spi_qup_is_dma_xfer(controller->mode)) {
846 if (!xfer->tx_buf)
847 config |= QUP_CONFIG_NO_OUTPUT;
848 if (!xfer->rx_buf)
849 config |= QUP_CONFIG_NO_INPUT;
850 }
851
852 writel_relaxed(config, controller->base + QUP_CONFIG);
853
854 /* only write to OPERATIONAL_MASK when register is present */
855 if (!controller->qup_v1) {
856 u32 mask = 0;
857
858 /*
859 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
860 * status change in BAM mode
861 */
862
863 if (spi_qup_is_dma_xfer(controller->mode))
864 mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
865
866 writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
867 }
868
869 return 0;
870}
871
872static int spi_qup_transfer_one(struct spi_controller *host,
873 struct spi_device *spi,
874 struct spi_transfer *xfer)
875{
876 struct spi_qup *controller = spi_controller_get_devdata(host);
877 unsigned long timeout, flags;
878 int ret;
879
880 ret = spi_qup_io_prep(spi, xfer);
881 if (ret)
882 return ret;
883
884 timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
885 timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
886 xfer->len) * 8, timeout);
887 timeout = 100 * msecs_to_jiffies(timeout);
888
889 reinit_completion(&controller->done);
890
891 spin_lock_irqsave(&controller->lock, flags);
892 controller->xfer = xfer;
893 controller->error = 0;
894 controller->rx_bytes = 0;
895 controller->tx_bytes = 0;
896 spin_unlock_irqrestore(&controller->lock, flags);
897
898 if (spi_qup_is_dma_xfer(controller->mode))
899 ret = spi_qup_do_dma(spi, xfer, timeout);
900 else
901 ret = spi_qup_do_pio(spi, xfer, timeout);
902
903 spi_qup_set_state(controller, QUP_STATE_RESET);
904 spin_lock_irqsave(&controller->lock, flags);
905 if (!ret)
906 ret = controller->error;
907 spin_unlock_irqrestore(&controller->lock, flags);
908
909 if (ret && spi_qup_is_dma_xfer(controller->mode))
910 spi_qup_dma_terminate(host, xfer);
911
912 return ret;
913}
914
915static bool spi_qup_can_dma(struct spi_controller *host, struct spi_device *spi,
916 struct spi_transfer *xfer)
917{
918 struct spi_qup *qup = spi_controller_get_devdata(host);
919 size_t dma_align = dma_get_cache_alignment();
920 int n_words;
921
922 if (xfer->rx_buf) {
923 if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
924 IS_ERR_OR_NULL(host->dma_rx))
925 return false;
926 if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
927 return false;
928 }
929
930 if (xfer->tx_buf) {
931 if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
932 IS_ERR_OR_NULL(host->dma_tx))
933 return false;
934 if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
935 return false;
936 }
937
938 n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
939 if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
940 return false;
941
942 return true;
943}
944
945static void spi_qup_release_dma(struct spi_controller *host)
946{
947 if (!IS_ERR_OR_NULL(host->dma_rx))
948 dma_release_channel(host->dma_rx);
949 if (!IS_ERR_OR_NULL(host->dma_tx))
950 dma_release_channel(host->dma_tx);
951}
952
953static int spi_qup_init_dma(struct spi_controller *host, resource_size_t base)
954{
955 struct spi_qup *spi = spi_controller_get_devdata(host);
956 struct dma_slave_config *rx_conf = &spi->rx_conf,
957 *tx_conf = &spi->tx_conf;
958 struct device *dev = spi->dev;
959 int ret;
960
961 /* allocate dma resources, if available */
962 host->dma_rx = dma_request_chan(dev, "rx");
963 if (IS_ERR(host->dma_rx))
964 return PTR_ERR(host->dma_rx);
965
966 host->dma_tx = dma_request_chan(dev, "tx");
967 if (IS_ERR(host->dma_tx)) {
968 ret = PTR_ERR(host->dma_tx);
969 goto err_tx;
970 }
971
972 /* set DMA parameters */
973 rx_conf->direction = DMA_DEV_TO_MEM;
974 rx_conf->device_fc = 1;
975 rx_conf->src_addr = base + QUP_INPUT_FIFO;
976 rx_conf->src_maxburst = spi->in_blk_sz;
977
978 tx_conf->direction = DMA_MEM_TO_DEV;
979 tx_conf->device_fc = 1;
980 tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
981 tx_conf->dst_maxburst = spi->out_blk_sz;
982
983 ret = dmaengine_slave_config(host->dma_rx, rx_conf);
984 if (ret) {
985 dev_err(dev, "failed to configure RX channel\n");
986 goto err;
987 }
988
989 ret = dmaengine_slave_config(host->dma_tx, tx_conf);
990 if (ret) {
991 dev_err(dev, "failed to configure TX channel\n");
992 goto err;
993 }
994
995 return 0;
996
997err:
998 dma_release_channel(host->dma_tx);
999err_tx:
1000 dma_release_channel(host->dma_rx);
1001 return ret;
1002}
1003
1004static void spi_qup_set_cs(struct spi_device *spi, bool val)
1005{
1006 struct spi_qup *controller;
1007 u32 spi_ioc;
1008 u32 spi_ioc_orig;
1009
1010 controller = spi_controller_get_devdata(spi->controller);
1011 spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
1012 spi_ioc_orig = spi_ioc;
1013 if (!val)
1014 spi_ioc |= SPI_IO_C_FORCE_CS;
1015 else
1016 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1017
1018 if (spi_ioc != spi_ioc_orig)
1019 writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
1020}
1021
1022static int spi_qup_probe(struct platform_device *pdev)
1023{
1024 struct spi_controller *host;
1025 struct icc_path *icc_path;
1026 struct clk *iclk, *cclk;
1027 struct spi_qup *controller;
1028 struct resource *res;
1029 struct device *dev;
1030 void __iomem *base;
1031 u32 max_freq, iomode, num_cs;
1032 int ret, irq, size;
1033
1034 dev = &pdev->dev;
1035 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1036 if (IS_ERR(base))
1037 return PTR_ERR(base);
1038
1039 irq = platform_get_irq(pdev, 0);
1040 if (irq < 0)
1041 return irq;
1042
1043 cclk = devm_clk_get(dev, "core");
1044 if (IS_ERR(cclk))
1045 return PTR_ERR(cclk);
1046
1047 iclk = devm_clk_get(dev, "iface");
1048 if (IS_ERR(iclk))
1049 return PTR_ERR(iclk);
1050
1051 icc_path = devm_of_icc_get(dev, NULL);
1052 if (IS_ERR(icc_path))
1053 return dev_err_probe(dev, PTR_ERR(icc_path),
1054 "failed to get interconnect path\n");
1055
1056 /* This is optional parameter */
1057 if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
1058 max_freq = SPI_MAX_RATE;
1059
1060 if (!max_freq || max_freq > SPI_MAX_RATE) {
1061 dev_err(dev, "invalid clock frequency %d\n", max_freq);
1062 return -ENXIO;
1063 }
1064
1065 ret = devm_pm_opp_set_clkname(dev, "core");
1066 if (ret)
1067 return ret;
1068
1069 /* OPP table is optional */
1070 ret = devm_pm_opp_of_add_table(dev);
1071 if (ret && ret != -ENODEV)
1072 return dev_err_probe(dev, ret, "invalid OPP table\n");
1073
1074 host = spi_alloc_host(dev, sizeof(struct spi_qup));
1075 if (!host) {
1076 dev_err(dev, "cannot allocate host\n");
1077 return -ENOMEM;
1078 }
1079
1080 /* use num-cs unless not present or out of range */
1081 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1082 num_cs > SPI_NUM_CHIPSELECTS)
1083 host->num_chipselect = SPI_NUM_CHIPSELECTS;
1084 else
1085 host->num_chipselect = num_cs;
1086
1087 host->use_gpio_descriptors = true;
1088 host->max_native_cs = SPI_NUM_CHIPSELECTS;
1089 host->bus_num = pdev->id;
1090 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1091 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1092 host->max_speed_hz = max_freq;
1093 host->transfer_one = spi_qup_transfer_one;
1094 host->dev.of_node = pdev->dev.of_node;
1095 host->auto_runtime_pm = true;
1096 host->dma_alignment = dma_get_cache_alignment();
1097 host->max_dma_len = SPI_MAX_XFER;
1098
1099 platform_set_drvdata(pdev, host);
1100
1101 controller = spi_controller_get_devdata(host);
1102
1103 controller->dev = dev;
1104 controller->base = base;
1105 controller->iclk = iclk;
1106 controller->cclk = cclk;
1107 controller->icc_path = icc_path;
1108 controller->irq = irq;
1109
1110 ret = spi_qup_init_dma(host, res->start);
1111 if (ret == -EPROBE_DEFER)
1112 goto error;
1113 else if (!ret)
1114 host->can_dma = spi_qup_can_dma;
1115
1116 controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1117
1118 if (!controller->qup_v1)
1119 host->set_cs = spi_qup_set_cs;
1120
1121 spin_lock_init(&controller->lock);
1122 init_completion(&controller->done);
1123
1124 ret = clk_prepare_enable(cclk);
1125 if (ret) {
1126 dev_err(dev, "cannot enable core clock\n");
1127 goto error_dma;
1128 }
1129
1130 ret = clk_prepare_enable(iclk);
1131 if (ret) {
1132 clk_disable_unprepare(cclk);
1133 dev_err(dev, "cannot enable iface clock\n");
1134 goto error_dma;
1135 }
1136
1137 iomode = readl_relaxed(base + QUP_IO_M_MODES);
1138
1139 size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1140 if (size)
1141 controller->out_blk_sz = size * 16;
1142 else
1143 controller->out_blk_sz = 4;
1144
1145 size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1146 if (size)
1147 controller->in_blk_sz = size * 16;
1148 else
1149 controller->in_blk_sz = 4;
1150
1151 size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1152 controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1153
1154 size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1155 controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1156
1157 dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1158 controller->in_blk_sz, controller->in_fifo_sz,
1159 controller->out_blk_sz, controller->out_fifo_sz);
1160
1161 writel_relaxed(1, base + QUP_SW_RESET);
1162
1163 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1164 if (ret) {
1165 dev_err(dev, "cannot set RESET state\n");
1166 goto error_clk;
1167 }
1168
1169 writel_relaxed(0, base + QUP_OPERATIONAL);
1170 writel_relaxed(0, base + QUP_IO_M_MODES);
1171
1172 if (!controller->qup_v1)
1173 writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1174
1175 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1176 base + SPI_ERROR_FLAGS_EN);
1177
1178 /* if earlier version of the QUP, disable INPUT_OVERRUN */
1179 if (controller->qup_v1)
1180 writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1181 QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1182 base + QUP_ERROR_FLAGS_EN);
1183
1184 writel_relaxed(0, base + SPI_CONFIG);
1185 writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1186
1187 ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1188 IRQF_TRIGGER_HIGH, pdev->name, controller);
1189 if (ret)
1190 goto error_clk;
1191
1192 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1193 pm_runtime_use_autosuspend(dev);
1194 pm_runtime_set_active(dev);
1195 pm_runtime_enable(dev);
1196
1197 ret = devm_spi_register_controller(dev, host);
1198 if (ret)
1199 goto disable_pm;
1200
1201 return 0;
1202
1203disable_pm:
1204 pm_runtime_disable(&pdev->dev);
1205error_clk:
1206 clk_disable_unprepare(cclk);
1207 clk_disable_unprepare(iclk);
1208error_dma:
1209 spi_qup_release_dma(host);
1210error:
1211 spi_controller_put(host);
1212 return ret;
1213}
1214
1215#ifdef CONFIG_PM
1216static int spi_qup_pm_suspend_runtime(struct device *device)
1217{
1218 struct spi_controller *host = dev_get_drvdata(device);
1219 struct spi_qup *controller = spi_controller_get_devdata(host);
1220 u32 config;
1221
1222 /* Enable clocks auto gaiting */
1223 config = readl(controller->base + QUP_CONFIG);
1224 config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1225 writel_relaxed(config, controller->base + QUP_CONFIG);
1226
1227 clk_disable_unprepare(controller->cclk);
1228 spi_qup_vote_bw(controller, 0);
1229 clk_disable_unprepare(controller->iclk);
1230
1231 return 0;
1232}
1233
1234static int spi_qup_pm_resume_runtime(struct device *device)
1235{
1236 struct spi_controller *host = dev_get_drvdata(device);
1237 struct spi_qup *controller = spi_controller_get_devdata(host);
1238 u32 config;
1239 int ret;
1240
1241 ret = clk_prepare_enable(controller->iclk);
1242 if (ret)
1243 return ret;
1244
1245 ret = clk_prepare_enable(controller->cclk);
1246 if (ret) {
1247 clk_disable_unprepare(controller->iclk);
1248 return ret;
1249 }
1250
1251 /* Disable clocks auto gaiting */
1252 config = readl_relaxed(controller->base + QUP_CONFIG);
1253 config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1254 writel_relaxed(config, controller->base + QUP_CONFIG);
1255 return 0;
1256}
1257#endif /* CONFIG_PM */
1258
1259#ifdef CONFIG_PM_SLEEP
1260static int spi_qup_suspend(struct device *device)
1261{
1262 struct spi_controller *host = dev_get_drvdata(device);
1263 struct spi_qup *controller = spi_controller_get_devdata(host);
1264 int ret;
1265
1266 if (pm_runtime_suspended(device)) {
1267 ret = spi_qup_pm_resume_runtime(device);
1268 if (ret)
1269 return ret;
1270 }
1271 ret = spi_controller_suspend(host);
1272 if (ret)
1273 return ret;
1274
1275 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1276 if (ret)
1277 return ret;
1278
1279 clk_disable_unprepare(controller->cclk);
1280 spi_qup_vote_bw(controller, 0);
1281 clk_disable_unprepare(controller->iclk);
1282 return 0;
1283}
1284
1285static int spi_qup_resume(struct device *device)
1286{
1287 struct spi_controller *host = dev_get_drvdata(device);
1288 struct spi_qup *controller = spi_controller_get_devdata(host);
1289 int ret;
1290
1291 ret = clk_prepare_enable(controller->iclk);
1292 if (ret)
1293 return ret;
1294
1295 ret = clk_prepare_enable(controller->cclk);
1296 if (ret) {
1297 clk_disable_unprepare(controller->iclk);
1298 return ret;
1299 }
1300
1301 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1302 if (ret)
1303 goto disable_clk;
1304
1305 ret = spi_controller_resume(host);
1306 if (ret)
1307 goto disable_clk;
1308
1309 return 0;
1310
1311disable_clk:
1312 clk_disable_unprepare(controller->cclk);
1313 clk_disable_unprepare(controller->iclk);
1314 return ret;
1315}
1316#endif /* CONFIG_PM_SLEEP */
1317
1318static void spi_qup_remove(struct platform_device *pdev)
1319{
1320 struct spi_controller *host = dev_get_drvdata(&pdev->dev);
1321 struct spi_qup *controller = spi_controller_get_devdata(host);
1322 int ret;
1323
1324 ret = pm_runtime_get_sync(&pdev->dev);
1325
1326 if (ret >= 0) {
1327 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1328 if (ret)
1329 dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
1330 ERR_PTR(ret));
1331
1332 clk_disable_unprepare(controller->cclk);
1333 clk_disable_unprepare(controller->iclk);
1334 } else {
1335 dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
1336 ERR_PTR(ret));
1337 }
1338
1339 spi_qup_release_dma(host);
1340
1341 pm_runtime_put_noidle(&pdev->dev);
1342 pm_runtime_disable(&pdev->dev);
1343}
1344
1345static const struct of_device_id spi_qup_dt_match[] = {
1346 { .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1347 { .compatible = "qcom,spi-qup-v2.1.1", },
1348 { .compatible = "qcom,spi-qup-v2.2.1", },
1349 { }
1350};
1351MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1352
1353static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1354 SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1355 SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1356 spi_qup_pm_resume_runtime,
1357 NULL)
1358};
1359
1360static struct platform_driver spi_qup_driver = {
1361 .driver = {
1362 .name = "spi_qup",
1363 .pm = &spi_qup_dev_pm_ops,
1364 .of_match_table = spi_qup_dt_match,
1365 },
1366 .probe = spi_qup_probe,
1367 .remove = spi_qup_remove,
1368};
1369module_platform_driver(spi_qup_driver);
1370
1371MODULE_DESCRIPTION("Qualcomm SPI controller with QUP interface");
1372MODULE_LICENSE("GPL v2");
1373MODULE_ALIAS("platform:spi_qup");
1/*
2 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License rev 2 and
6 * only rev 2 as published by the free Software foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/err.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_device.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/spi/spi.h>
26#include <linux/dmaengine.h>
27#include <linux/dma-mapping.h>
28
29#define QUP_CONFIG 0x0000
30#define QUP_STATE 0x0004
31#define QUP_IO_M_MODES 0x0008
32#define QUP_SW_RESET 0x000c
33#define QUP_OPERATIONAL 0x0018
34#define QUP_ERROR_FLAGS 0x001c
35#define QUP_ERROR_FLAGS_EN 0x0020
36#define QUP_OPERATIONAL_MASK 0x0028
37#define QUP_HW_VERSION 0x0030
38#define QUP_MX_OUTPUT_CNT 0x0100
39#define QUP_OUTPUT_FIFO 0x0110
40#define QUP_MX_WRITE_CNT 0x0150
41#define QUP_MX_INPUT_CNT 0x0200
42#define QUP_MX_READ_CNT 0x0208
43#define QUP_INPUT_FIFO 0x0218
44
45#define SPI_CONFIG 0x0300
46#define SPI_IO_CONTROL 0x0304
47#define SPI_ERROR_FLAGS 0x0308
48#define SPI_ERROR_FLAGS_EN 0x030c
49
50/* QUP_CONFIG fields */
51#define QUP_CONFIG_SPI_MODE (1 << 8)
52#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
53#define QUP_CONFIG_NO_INPUT BIT(7)
54#define QUP_CONFIG_NO_OUTPUT BIT(6)
55#define QUP_CONFIG_N 0x001f
56
57/* QUP_STATE fields */
58#define QUP_STATE_VALID BIT(2)
59#define QUP_STATE_RESET 0
60#define QUP_STATE_RUN 1
61#define QUP_STATE_PAUSE 3
62#define QUP_STATE_MASK 3
63#define QUP_STATE_CLEAR 2
64
65#define QUP_HW_VERSION_2_1_1 0x20010001
66
67/* QUP_IO_M_MODES fields */
68#define QUP_IO_M_PACK_EN BIT(15)
69#define QUP_IO_M_UNPACK_EN BIT(14)
70#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
71#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
72#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
73#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
74
75#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
76#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
77#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
78#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
79
80#define QUP_IO_M_MODE_FIFO 0
81#define QUP_IO_M_MODE_BLOCK 1
82#define QUP_IO_M_MODE_DMOV 2
83#define QUP_IO_M_MODE_BAM 3
84
85/* QUP_OPERATIONAL fields */
86#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
87#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
88#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
89#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
90#define QUP_OP_IN_SERVICE_FLAG BIT(9)
91#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
92#define QUP_OP_IN_FIFO_FULL BIT(7)
93#define QUP_OP_OUT_FIFO_FULL BIT(6)
94#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
95#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
96
97/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
98#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
99#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
100#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
101#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
102
103/* SPI_CONFIG fields */
104#define SPI_CONFIG_HS_MODE BIT(10)
105#define SPI_CONFIG_INPUT_FIRST BIT(9)
106#define SPI_CONFIG_LOOPBACK BIT(8)
107
108/* SPI_IO_CONTROL fields */
109#define SPI_IO_C_FORCE_CS BIT(11)
110#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
111#define SPI_IO_C_MX_CS_MODE BIT(8)
112#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
113#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
114#define SPI_IO_C_CS_SELECT_MASK 0x000c
115#define SPI_IO_C_TRISTATE_CS BIT(1)
116#define SPI_IO_C_NO_TRI_STATE BIT(0)
117
118/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
119#define SPI_ERROR_CLK_OVER_RUN BIT(1)
120#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
121
122#define SPI_NUM_CHIPSELECTS 4
123
124#define SPI_MAX_XFER (SZ_64K - 64)
125
126/* high speed mode is when bus rate is greater then 26MHz */
127#define SPI_HS_MIN_RATE 26000000
128#define SPI_MAX_RATE 50000000
129
130#define SPI_DELAY_THRESHOLD 1
131#define SPI_DELAY_RETRY 10
132
133struct spi_qup {
134 void __iomem *base;
135 struct device *dev;
136 struct clk *cclk; /* core clock */
137 struct clk *iclk; /* interface clock */
138 int irq;
139 spinlock_t lock;
140
141 int in_fifo_sz;
142 int out_fifo_sz;
143 int in_blk_sz;
144 int out_blk_sz;
145
146 struct spi_transfer *xfer;
147 struct completion done;
148 int error;
149 int w_size; /* bytes per SPI word */
150 int n_words;
151 int tx_bytes;
152 int rx_bytes;
153 const u8 *tx_buf;
154 u8 *rx_buf;
155 int qup_v1;
156
157 int mode;
158 struct dma_slave_config rx_conf;
159 struct dma_slave_config tx_conf;
160};
161
162static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
163
164static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
165{
166 u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
167
168 return (opflag & flag) != 0;
169}
170
171static inline bool spi_qup_is_dma_xfer(int mode)
172{
173 if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
174 return true;
175
176 return false;
177}
178
179/* get's the transaction size length */
180static inline unsigned int spi_qup_len(struct spi_qup *controller)
181{
182 return controller->n_words * controller->w_size;
183}
184
185static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
186{
187 u32 opstate = readl_relaxed(controller->base + QUP_STATE);
188
189 return opstate & QUP_STATE_VALID;
190}
191
192static int spi_qup_set_state(struct spi_qup *controller, u32 state)
193{
194 unsigned long loop;
195 u32 cur_state;
196
197 loop = 0;
198 while (!spi_qup_is_valid_state(controller)) {
199
200 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
201
202 if (++loop > SPI_DELAY_RETRY)
203 return -EIO;
204 }
205
206 if (loop)
207 dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
208 loop, state);
209
210 cur_state = readl_relaxed(controller->base + QUP_STATE);
211 /*
212 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
213 * of (b10) are required
214 */
215 if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
216 (state == QUP_STATE_RESET)) {
217 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
218 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
219 } else {
220 cur_state &= ~QUP_STATE_MASK;
221 cur_state |= state;
222 writel_relaxed(cur_state, controller->base + QUP_STATE);
223 }
224
225 loop = 0;
226 while (!spi_qup_is_valid_state(controller)) {
227
228 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
229
230 if (++loop > SPI_DELAY_RETRY)
231 return -EIO;
232 }
233
234 return 0;
235}
236
237static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
238{
239 u8 *rx_buf = controller->rx_buf;
240 int i, shift, num_bytes;
241 u32 word;
242
243 for (; num_words; num_words--) {
244
245 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
246
247 num_bytes = min_t(int, spi_qup_len(controller) -
248 controller->rx_bytes,
249 controller->w_size);
250
251 if (!rx_buf) {
252 controller->rx_bytes += num_bytes;
253 continue;
254 }
255
256 for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
257 /*
258 * The data format depends on bytes per SPI word:
259 * 4 bytes: 0x12345678
260 * 2 bytes: 0x00001234
261 * 1 byte : 0x00000012
262 */
263 shift = BITS_PER_BYTE;
264 shift *= (controller->w_size - i - 1);
265 rx_buf[controller->rx_bytes] = word >> shift;
266 }
267 }
268}
269
270static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
271{
272 u32 remainder, words_per_block, num_words;
273 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
274
275 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
276 controller->w_size);
277 words_per_block = controller->in_blk_sz >> 2;
278
279 do {
280 /* ACK by clearing service flag */
281 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
282 controller->base + QUP_OPERATIONAL);
283
284 if (is_block_mode) {
285 num_words = (remainder > words_per_block) ?
286 words_per_block : remainder;
287 } else {
288 if (!spi_qup_is_flag_set(controller,
289 QUP_OP_IN_FIFO_NOT_EMPTY))
290 break;
291
292 num_words = 1;
293 }
294
295 /* read up to the maximum transfer size available */
296 spi_qup_read_from_fifo(controller, num_words);
297
298 remainder -= num_words;
299
300 /* if block mode, check to see if next block is available */
301 if (is_block_mode && !spi_qup_is_flag_set(controller,
302 QUP_OP_IN_BLOCK_READ_REQ))
303 break;
304
305 } while (remainder);
306
307 /*
308 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
309 * reads, it has to be cleared again at the very end. However, be sure
310 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
311 * present and this is used to determine if transaction is complete
312 */
313 *opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
314 if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
315 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
316 controller->base + QUP_OPERATIONAL);
317
318}
319
320static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
321{
322 const u8 *tx_buf = controller->tx_buf;
323 int i, num_bytes;
324 u32 word, data;
325
326 for (; num_words; num_words--) {
327 word = 0;
328
329 num_bytes = min_t(int, spi_qup_len(controller) -
330 controller->tx_bytes,
331 controller->w_size);
332 if (tx_buf)
333 for (i = 0; i < num_bytes; i++) {
334 data = tx_buf[controller->tx_bytes + i];
335 word |= data << (BITS_PER_BYTE * (3 - i));
336 }
337
338 controller->tx_bytes += num_bytes;
339
340 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
341 }
342}
343
344static void spi_qup_dma_done(void *data)
345{
346 struct spi_qup *qup = data;
347
348 complete(&qup->done);
349}
350
351static void spi_qup_write(struct spi_qup *controller)
352{
353 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
354 u32 remainder, words_per_block, num_words;
355
356 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
357 controller->w_size);
358 words_per_block = controller->out_blk_sz >> 2;
359
360 do {
361 /* ACK by clearing service flag */
362 writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
363 controller->base + QUP_OPERATIONAL);
364
365 if (is_block_mode) {
366 num_words = (remainder > words_per_block) ?
367 words_per_block : remainder;
368 } else {
369 if (spi_qup_is_flag_set(controller,
370 QUP_OP_OUT_FIFO_FULL))
371 break;
372
373 num_words = 1;
374 }
375
376 spi_qup_write_to_fifo(controller, num_words);
377
378 remainder -= num_words;
379
380 /* if block mode, check to see if next block is available */
381 if (is_block_mode && !spi_qup_is_flag_set(controller,
382 QUP_OP_OUT_BLOCK_WRITE_REQ))
383 break;
384
385 } while (remainder);
386}
387
388static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
389 unsigned int nents, enum dma_transfer_direction dir,
390 dma_async_tx_callback callback)
391{
392 struct spi_qup *qup = spi_master_get_devdata(master);
393 unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
394 struct dma_async_tx_descriptor *desc;
395 struct dma_chan *chan;
396 dma_cookie_t cookie;
397
398 if (dir == DMA_MEM_TO_DEV)
399 chan = master->dma_tx;
400 else
401 chan = master->dma_rx;
402
403 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
404 if (IS_ERR_OR_NULL(desc))
405 return desc ? PTR_ERR(desc) : -EINVAL;
406
407 desc->callback = callback;
408 desc->callback_param = qup;
409
410 cookie = dmaengine_submit(desc);
411
412 return dma_submit_error(cookie);
413}
414
415static void spi_qup_dma_terminate(struct spi_master *master,
416 struct spi_transfer *xfer)
417{
418 if (xfer->tx_buf)
419 dmaengine_terminate_all(master->dma_tx);
420 if (xfer->rx_buf)
421 dmaengine_terminate_all(master->dma_rx);
422}
423
424static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
425 u32 *nents)
426{
427 struct scatterlist *sg;
428 u32 total = 0;
429
430 for (sg = sgl; sg; sg = sg_next(sg)) {
431 unsigned int len = sg_dma_len(sg);
432
433 /* check for overflow as well as limit */
434 if (((total + len) < total) || ((total + len) > max))
435 break;
436
437 total += len;
438 (*nents)++;
439 }
440
441 return total;
442}
443
444static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
445 unsigned long timeout)
446{
447 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
448 struct spi_master *master = spi->master;
449 struct spi_qup *qup = spi_master_get_devdata(master);
450 struct scatterlist *tx_sgl, *rx_sgl;
451 int ret;
452
453 if (xfer->rx_buf)
454 rx_done = spi_qup_dma_done;
455 else if (xfer->tx_buf)
456 tx_done = spi_qup_dma_done;
457
458 rx_sgl = xfer->rx_sg.sgl;
459 tx_sgl = xfer->tx_sg.sgl;
460
461 do {
462 u32 rx_nents = 0, tx_nents = 0;
463
464 if (rx_sgl)
465 qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
466 SPI_MAX_XFER, &rx_nents) / qup->w_size;
467 if (tx_sgl)
468 qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
469 SPI_MAX_XFER, &tx_nents) / qup->w_size;
470 if (!qup->n_words)
471 return -EIO;
472
473 ret = spi_qup_io_config(spi, xfer);
474 if (ret)
475 return ret;
476
477 /* before issuing the descriptors, set the QUP to run */
478 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
479 if (ret) {
480 dev_warn(qup->dev, "cannot set RUN state\n");
481 return ret;
482 }
483 if (rx_sgl) {
484 ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
485 DMA_DEV_TO_MEM, rx_done);
486 if (ret)
487 return ret;
488 dma_async_issue_pending(master->dma_rx);
489 }
490
491 if (tx_sgl) {
492 ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
493 DMA_MEM_TO_DEV, tx_done);
494 if (ret)
495 return ret;
496
497 dma_async_issue_pending(master->dma_tx);
498 }
499
500 if (!wait_for_completion_timeout(&qup->done, timeout))
501 return -ETIMEDOUT;
502
503 for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
504 ;
505 for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
506 ;
507
508 } while (rx_sgl || tx_sgl);
509
510 return 0;
511}
512
513static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
514 unsigned long timeout)
515{
516 struct spi_master *master = spi->master;
517 struct spi_qup *qup = spi_master_get_devdata(master);
518 int ret, n_words, iterations, offset = 0;
519
520 n_words = qup->n_words;
521 iterations = n_words / SPI_MAX_XFER; /* round down */
522 qup->rx_buf = xfer->rx_buf;
523 qup->tx_buf = xfer->tx_buf;
524
525 do {
526 if (iterations)
527 qup->n_words = SPI_MAX_XFER;
528 else
529 qup->n_words = n_words % SPI_MAX_XFER;
530
531 if (qup->tx_buf && offset)
532 qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
533
534 if (qup->rx_buf && offset)
535 qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
536
537 /*
538 * if the transaction is small enough, we need
539 * to fallback to FIFO mode
540 */
541 if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
542 qup->mode = QUP_IO_M_MODE_FIFO;
543
544 ret = spi_qup_io_config(spi, xfer);
545 if (ret)
546 return ret;
547
548 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
549 if (ret) {
550 dev_warn(qup->dev, "cannot set RUN state\n");
551 return ret;
552 }
553
554 ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
555 if (ret) {
556 dev_warn(qup->dev, "cannot set PAUSE state\n");
557 return ret;
558 }
559
560 if (qup->mode == QUP_IO_M_MODE_FIFO)
561 spi_qup_write(qup);
562
563 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
564 if (ret) {
565 dev_warn(qup->dev, "cannot set RUN state\n");
566 return ret;
567 }
568
569 if (!wait_for_completion_timeout(&qup->done, timeout))
570 return -ETIMEDOUT;
571
572 offset++;
573 } while (iterations--);
574
575 return 0;
576}
577
578static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
579{
580 struct spi_qup *controller = dev_id;
581 u32 opflags, qup_err, spi_err;
582 int error = 0;
583
584 qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
585 spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
586 opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
587
588 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
589 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
590
591 if (qup_err) {
592 if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
593 dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
594 if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
595 dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
596 if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
597 dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
598 if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
599 dev_warn(controller->dev, "INPUT_OVER_RUN\n");
600
601 error = -EIO;
602 }
603
604 if (spi_err) {
605 if (spi_err & SPI_ERROR_CLK_OVER_RUN)
606 dev_warn(controller->dev, "CLK_OVER_RUN\n");
607 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
608 dev_warn(controller->dev, "CLK_UNDER_RUN\n");
609
610 error = -EIO;
611 }
612
613 if (spi_qup_is_dma_xfer(controller->mode)) {
614 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
615 } else {
616 if (opflags & QUP_OP_IN_SERVICE_FLAG)
617 spi_qup_read(controller, &opflags);
618
619 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
620 spi_qup_write(controller);
621 }
622
623 if ((opflags & QUP_OP_MAX_INPUT_DONE_FLAG) || error)
624 complete(&controller->done);
625
626 return IRQ_HANDLED;
627}
628
629/* set clock freq ... bits per word, determine mode */
630static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
631{
632 struct spi_qup *controller = spi_master_get_devdata(spi->master);
633 int ret;
634
635 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
636 dev_err(controller->dev, "too big size for loopback %d > %d\n",
637 xfer->len, controller->in_fifo_sz);
638 return -EIO;
639 }
640
641 ret = clk_set_rate(controller->cclk, xfer->speed_hz);
642 if (ret) {
643 dev_err(controller->dev, "fail to set frequency %d",
644 xfer->speed_hz);
645 return -EIO;
646 }
647
648 controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
649 controller->n_words = xfer->len / controller->w_size;
650
651 if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
652 controller->mode = QUP_IO_M_MODE_FIFO;
653 else if (spi->master->can_dma &&
654 spi->master->can_dma(spi->master, spi, xfer) &&
655 spi->master->cur_msg_mapped)
656 controller->mode = QUP_IO_M_MODE_BAM;
657 else
658 controller->mode = QUP_IO_M_MODE_BLOCK;
659
660 return 0;
661}
662
663/* prep qup for another spi transaction of specific type */
664static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
665{
666 struct spi_qup *controller = spi_master_get_devdata(spi->master);
667 u32 config, iomode, control;
668 unsigned long flags;
669
670 spin_lock_irqsave(&controller->lock, flags);
671 controller->xfer = xfer;
672 controller->error = 0;
673 controller->rx_bytes = 0;
674 controller->tx_bytes = 0;
675 spin_unlock_irqrestore(&controller->lock, flags);
676
677
678 if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
679 dev_err(controller->dev, "cannot set RESET state\n");
680 return -EIO;
681 }
682
683 switch (controller->mode) {
684 case QUP_IO_M_MODE_FIFO:
685 writel_relaxed(controller->n_words,
686 controller->base + QUP_MX_READ_CNT);
687 writel_relaxed(controller->n_words,
688 controller->base + QUP_MX_WRITE_CNT);
689 /* must be zero for FIFO */
690 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
691 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
692 break;
693 case QUP_IO_M_MODE_BAM:
694 writel_relaxed(controller->n_words,
695 controller->base + QUP_MX_INPUT_CNT);
696 writel_relaxed(controller->n_words,
697 controller->base + QUP_MX_OUTPUT_CNT);
698 /* must be zero for BLOCK and BAM */
699 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
700 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
701
702 if (!controller->qup_v1) {
703 void __iomem *input_cnt;
704
705 input_cnt = controller->base + QUP_MX_INPUT_CNT;
706 /*
707 * for DMA transfers, both QUP_MX_INPUT_CNT and
708 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
709 * That case is a non-balanced transfer when there is
710 * only a rx_buf.
711 */
712 if (xfer->tx_buf)
713 writel_relaxed(0, input_cnt);
714 else
715 writel_relaxed(controller->n_words, input_cnt);
716
717 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
718 }
719 break;
720 case QUP_IO_M_MODE_BLOCK:
721 reinit_completion(&controller->done);
722 writel_relaxed(controller->n_words,
723 controller->base + QUP_MX_INPUT_CNT);
724 writel_relaxed(controller->n_words,
725 controller->base + QUP_MX_OUTPUT_CNT);
726 /* must be zero for BLOCK and BAM */
727 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
728 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
729 break;
730 default:
731 dev_err(controller->dev, "unknown mode = %d\n",
732 controller->mode);
733 return -EIO;
734 }
735
736 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
737 /* Set input and output transfer mode */
738 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
739
740 if (!spi_qup_is_dma_xfer(controller->mode))
741 iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
742 else
743 iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
744
745 iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
746 iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
747
748 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
749
750 control = readl_relaxed(controller->base + SPI_IO_CONTROL);
751
752 if (spi->mode & SPI_CPOL)
753 control |= SPI_IO_C_CLK_IDLE_HIGH;
754 else
755 control &= ~SPI_IO_C_CLK_IDLE_HIGH;
756
757 writel_relaxed(control, controller->base + SPI_IO_CONTROL);
758
759 config = readl_relaxed(controller->base + SPI_CONFIG);
760
761 if (spi->mode & SPI_LOOP)
762 config |= SPI_CONFIG_LOOPBACK;
763 else
764 config &= ~SPI_CONFIG_LOOPBACK;
765
766 if (spi->mode & SPI_CPHA)
767 config &= ~SPI_CONFIG_INPUT_FIRST;
768 else
769 config |= SPI_CONFIG_INPUT_FIRST;
770
771 /*
772 * HS_MODE improves signal stability for spi-clk high rates,
773 * but is invalid in loop back mode.
774 */
775 if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
776 config |= SPI_CONFIG_HS_MODE;
777 else
778 config &= ~SPI_CONFIG_HS_MODE;
779
780 writel_relaxed(config, controller->base + SPI_CONFIG);
781
782 config = readl_relaxed(controller->base + QUP_CONFIG);
783 config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
784 config |= xfer->bits_per_word - 1;
785 config |= QUP_CONFIG_SPI_MODE;
786
787 if (spi_qup_is_dma_xfer(controller->mode)) {
788 if (!xfer->tx_buf)
789 config |= QUP_CONFIG_NO_OUTPUT;
790 if (!xfer->rx_buf)
791 config |= QUP_CONFIG_NO_INPUT;
792 }
793
794 writel_relaxed(config, controller->base + QUP_CONFIG);
795
796 /* only write to OPERATIONAL_MASK when register is present */
797 if (!controller->qup_v1) {
798 u32 mask = 0;
799
800 /*
801 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
802 * status change in BAM mode
803 */
804
805 if (spi_qup_is_dma_xfer(controller->mode))
806 mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
807
808 writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
809 }
810
811 return 0;
812}
813
814static int spi_qup_transfer_one(struct spi_master *master,
815 struct spi_device *spi,
816 struct spi_transfer *xfer)
817{
818 struct spi_qup *controller = spi_master_get_devdata(master);
819 unsigned long timeout, flags;
820 int ret = -EIO;
821
822 ret = spi_qup_io_prep(spi, xfer);
823 if (ret)
824 return ret;
825
826 timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
827 timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
828 xfer->len) * 8, timeout);
829 timeout = 100 * msecs_to_jiffies(timeout);
830
831 reinit_completion(&controller->done);
832
833 spin_lock_irqsave(&controller->lock, flags);
834 controller->xfer = xfer;
835 controller->error = 0;
836 controller->rx_bytes = 0;
837 controller->tx_bytes = 0;
838 spin_unlock_irqrestore(&controller->lock, flags);
839
840 if (spi_qup_is_dma_xfer(controller->mode))
841 ret = spi_qup_do_dma(spi, xfer, timeout);
842 else
843 ret = spi_qup_do_pio(spi, xfer, timeout);
844
845 if (ret)
846 goto exit;
847
848exit:
849 spi_qup_set_state(controller, QUP_STATE_RESET);
850 spin_lock_irqsave(&controller->lock, flags);
851 if (!ret)
852 ret = controller->error;
853 spin_unlock_irqrestore(&controller->lock, flags);
854
855 if (ret && spi_qup_is_dma_xfer(controller->mode))
856 spi_qup_dma_terminate(master, xfer);
857
858 return ret;
859}
860
861static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
862 struct spi_transfer *xfer)
863{
864 struct spi_qup *qup = spi_master_get_devdata(master);
865 size_t dma_align = dma_get_cache_alignment();
866 int n_words;
867
868 if (xfer->rx_buf) {
869 if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
870 IS_ERR_OR_NULL(master->dma_rx))
871 return false;
872 if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
873 return false;
874 }
875
876 if (xfer->tx_buf) {
877 if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
878 IS_ERR_OR_NULL(master->dma_tx))
879 return false;
880 if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
881 return false;
882 }
883
884 n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
885 if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
886 return false;
887
888 return true;
889}
890
891static void spi_qup_release_dma(struct spi_master *master)
892{
893 if (!IS_ERR_OR_NULL(master->dma_rx))
894 dma_release_channel(master->dma_rx);
895 if (!IS_ERR_OR_NULL(master->dma_tx))
896 dma_release_channel(master->dma_tx);
897}
898
899static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
900{
901 struct spi_qup *spi = spi_master_get_devdata(master);
902 struct dma_slave_config *rx_conf = &spi->rx_conf,
903 *tx_conf = &spi->tx_conf;
904 struct device *dev = spi->dev;
905 int ret;
906
907 /* allocate dma resources, if available */
908 master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
909 if (IS_ERR(master->dma_rx))
910 return PTR_ERR(master->dma_rx);
911
912 master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
913 if (IS_ERR(master->dma_tx)) {
914 ret = PTR_ERR(master->dma_tx);
915 goto err_tx;
916 }
917
918 /* set DMA parameters */
919 rx_conf->direction = DMA_DEV_TO_MEM;
920 rx_conf->device_fc = 1;
921 rx_conf->src_addr = base + QUP_INPUT_FIFO;
922 rx_conf->src_maxburst = spi->in_blk_sz;
923
924 tx_conf->direction = DMA_MEM_TO_DEV;
925 tx_conf->device_fc = 1;
926 tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
927 tx_conf->dst_maxburst = spi->out_blk_sz;
928
929 ret = dmaengine_slave_config(master->dma_rx, rx_conf);
930 if (ret) {
931 dev_err(dev, "failed to configure RX channel\n");
932 goto err;
933 }
934
935 ret = dmaengine_slave_config(master->dma_tx, tx_conf);
936 if (ret) {
937 dev_err(dev, "failed to configure TX channel\n");
938 goto err;
939 }
940
941 return 0;
942
943err:
944 dma_release_channel(master->dma_tx);
945err_tx:
946 dma_release_channel(master->dma_rx);
947 return ret;
948}
949
950static void spi_qup_set_cs(struct spi_device *spi, bool val)
951{
952 struct spi_qup *controller;
953 u32 spi_ioc;
954 u32 spi_ioc_orig;
955
956 controller = spi_master_get_devdata(spi->master);
957 spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
958 spi_ioc_orig = spi_ioc;
959 if (!val)
960 spi_ioc |= SPI_IO_C_FORCE_CS;
961 else
962 spi_ioc &= ~SPI_IO_C_FORCE_CS;
963
964 if (spi_ioc != spi_ioc_orig)
965 writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
966}
967
968static int spi_qup_probe(struct platform_device *pdev)
969{
970 struct spi_master *master;
971 struct clk *iclk, *cclk;
972 struct spi_qup *controller;
973 struct resource *res;
974 struct device *dev;
975 void __iomem *base;
976 u32 max_freq, iomode, num_cs;
977 int ret, irq, size;
978
979 dev = &pdev->dev;
980 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
981 base = devm_ioremap_resource(dev, res);
982 if (IS_ERR(base))
983 return PTR_ERR(base);
984
985 irq = platform_get_irq(pdev, 0);
986 if (irq < 0)
987 return irq;
988
989 cclk = devm_clk_get(dev, "core");
990 if (IS_ERR(cclk))
991 return PTR_ERR(cclk);
992
993 iclk = devm_clk_get(dev, "iface");
994 if (IS_ERR(iclk))
995 return PTR_ERR(iclk);
996
997 /* This is optional parameter */
998 if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
999 max_freq = SPI_MAX_RATE;
1000
1001 if (!max_freq || max_freq > SPI_MAX_RATE) {
1002 dev_err(dev, "invalid clock frequency %d\n", max_freq);
1003 return -ENXIO;
1004 }
1005
1006 ret = clk_prepare_enable(cclk);
1007 if (ret) {
1008 dev_err(dev, "cannot enable core clock\n");
1009 return ret;
1010 }
1011
1012 ret = clk_prepare_enable(iclk);
1013 if (ret) {
1014 clk_disable_unprepare(cclk);
1015 dev_err(dev, "cannot enable iface clock\n");
1016 return ret;
1017 }
1018
1019 master = spi_alloc_master(dev, sizeof(struct spi_qup));
1020 if (!master) {
1021 clk_disable_unprepare(cclk);
1022 clk_disable_unprepare(iclk);
1023 dev_err(dev, "cannot allocate master\n");
1024 return -ENOMEM;
1025 }
1026
1027 /* use num-cs unless not present or out of range */
1028 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1029 num_cs > SPI_NUM_CHIPSELECTS)
1030 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1031 else
1032 master->num_chipselect = num_cs;
1033
1034 master->bus_num = pdev->id;
1035 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1036 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1037 master->max_speed_hz = max_freq;
1038 master->transfer_one = spi_qup_transfer_one;
1039 master->dev.of_node = pdev->dev.of_node;
1040 master->auto_runtime_pm = true;
1041 master->dma_alignment = dma_get_cache_alignment();
1042 master->max_dma_len = SPI_MAX_XFER;
1043
1044 platform_set_drvdata(pdev, master);
1045
1046 controller = spi_master_get_devdata(master);
1047
1048 controller->dev = dev;
1049 controller->base = base;
1050 controller->iclk = iclk;
1051 controller->cclk = cclk;
1052 controller->irq = irq;
1053
1054 ret = spi_qup_init_dma(master, res->start);
1055 if (ret == -EPROBE_DEFER)
1056 goto error;
1057 else if (!ret)
1058 master->can_dma = spi_qup_can_dma;
1059
1060 controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1061
1062 if (!controller->qup_v1)
1063 master->set_cs = spi_qup_set_cs;
1064
1065 spin_lock_init(&controller->lock);
1066 init_completion(&controller->done);
1067
1068 iomode = readl_relaxed(base + QUP_IO_M_MODES);
1069
1070 size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1071 if (size)
1072 controller->out_blk_sz = size * 16;
1073 else
1074 controller->out_blk_sz = 4;
1075
1076 size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1077 if (size)
1078 controller->in_blk_sz = size * 16;
1079 else
1080 controller->in_blk_sz = 4;
1081
1082 size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1083 controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1084
1085 size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1086 controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1087
1088 dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1089 controller->in_blk_sz, controller->in_fifo_sz,
1090 controller->out_blk_sz, controller->out_fifo_sz);
1091
1092 writel_relaxed(1, base + QUP_SW_RESET);
1093
1094 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1095 if (ret) {
1096 dev_err(dev, "cannot set RESET state\n");
1097 goto error_dma;
1098 }
1099
1100 writel_relaxed(0, base + QUP_OPERATIONAL);
1101 writel_relaxed(0, base + QUP_IO_M_MODES);
1102
1103 if (!controller->qup_v1)
1104 writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1105
1106 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1107 base + SPI_ERROR_FLAGS_EN);
1108
1109 /* if earlier version of the QUP, disable INPUT_OVERRUN */
1110 if (controller->qup_v1)
1111 writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1112 QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1113 base + QUP_ERROR_FLAGS_EN);
1114
1115 writel_relaxed(0, base + SPI_CONFIG);
1116 writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1117
1118 ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1119 IRQF_TRIGGER_HIGH, pdev->name, controller);
1120 if (ret)
1121 goto error_dma;
1122
1123 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1124 pm_runtime_use_autosuspend(dev);
1125 pm_runtime_set_active(dev);
1126 pm_runtime_enable(dev);
1127
1128 ret = devm_spi_register_master(dev, master);
1129 if (ret)
1130 goto disable_pm;
1131
1132 return 0;
1133
1134disable_pm:
1135 pm_runtime_disable(&pdev->dev);
1136error_dma:
1137 spi_qup_release_dma(master);
1138error:
1139 clk_disable_unprepare(cclk);
1140 clk_disable_unprepare(iclk);
1141 spi_master_put(master);
1142 return ret;
1143}
1144
1145#ifdef CONFIG_PM
1146static int spi_qup_pm_suspend_runtime(struct device *device)
1147{
1148 struct spi_master *master = dev_get_drvdata(device);
1149 struct spi_qup *controller = spi_master_get_devdata(master);
1150 u32 config;
1151
1152 /* Enable clocks auto gaiting */
1153 config = readl(controller->base + QUP_CONFIG);
1154 config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1155 writel_relaxed(config, controller->base + QUP_CONFIG);
1156
1157 clk_disable_unprepare(controller->cclk);
1158 clk_disable_unprepare(controller->iclk);
1159
1160 return 0;
1161}
1162
1163static int spi_qup_pm_resume_runtime(struct device *device)
1164{
1165 struct spi_master *master = dev_get_drvdata(device);
1166 struct spi_qup *controller = spi_master_get_devdata(master);
1167 u32 config;
1168 int ret;
1169
1170 ret = clk_prepare_enable(controller->iclk);
1171 if (ret)
1172 return ret;
1173
1174 ret = clk_prepare_enable(controller->cclk);
1175 if (ret)
1176 return ret;
1177
1178 /* Disable clocks auto gaiting */
1179 config = readl_relaxed(controller->base + QUP_CONFIG);
1180 config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1181 writel_relaxed(config, controller->base + QUP_CONFIG);
1182 return 0;
1183}
1184#endif /* CONFIG_PM */
1185
1186#ifdef CONFIG_PM_SLEEP
1187static int spi_qup_suspend(struct device *device)
1188{
1189 struct spi_master *master = dev_get_drvdata(device);
1190 struct spi_qup *controller = spi_master_get_devdata(master);
1191 int ret;
1192
1193 ret = spi_master_suspend(master);
1194 if (ret)
1195 return ret;
1196
1197 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1198 if (ret)
1199 return ret;
1200
1201 if (!pm_runtime_suspended(device)) {
1202 clk_disable_unprepare(controller->cclk);
1203 clk_disable_unprepare(controller->iclk);
1204 }
1205 return 0;
1206}
1207
1208static int spi_qup_resume(struct device *device)
1209{
1210 struct spi_master *master = dev_get_drvdata(device);
1211 struct spi_qup *controller = spi_master_get_devdata(master);
1212 int ret;
1213
1214 ret = clk_prepare_enable(controller->iclk);
1215 if (ret)
1216 return ret;
1217
1218 ret = clk_prepare_enable(controller->cclk);
1219 if (ret)
1220 return ret;
1221
1222 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1223 if (ret)
1224 return ret;
1225
1226 return spi_master_resume(master);
1227}
1228#endif /* CONFIG_PM_SLEEP */
1229
1230static int spi_qup_remove(struct platform_device *pdev)
1231{
1232 struct spi_master *master = dev_get_drvdata(&pdev->dev);
1233 struct spi_qup *controller = spi_master_get_devdata(master);
1234 int ret;
1235
1236 ret = pm_runtime_get_sync(&pdev->dev);
1237 if (ret < 0)
1238 return ret;
1239
1240 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1241 if (ret)
1242 return ret;
1243
1244 spi_qup_release_dma(master);
1245
1246 clk_disable_unprepare(controller->cclk);
1247 clk_disable_unprepare(controller->iclk);
1248
1249 pm_runtime_put_noidle(&pdev->dev);
1250 pm_runtime_disable(&pdev->dev);
1251
1252 return 0;
1253}
1254
1255static const struct of_device_id spi_qup_dt_match[] = {
1256 { .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1257 { .compatible = "qcom,spi-qup-v2.1.1", },
1258 { .compatible = "qcom,spi-qup-v2.2.1", },
1259 { }
1260};
1261MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1262
1263static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1264 SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1265 SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1266 spi_qup_pm_resume_runtime,
1267 NULL)
1268};
1269
1270static struct platform_driver spi_qup_driver = {
1271 .driver = {
1272 .name = "spi_qup",
1273 .pm = &spi_qup_dev_pm_ops,
1274 .of_match_table = spi_qup_dt_match,
1275 },
1276 .probe = spi_qup_probe,
1277 .remove = spi_qup_remove,
1278};
1279module_platform_driver(spi_qup_driver);
1280
1281MODULE_LICENSE("GPL v2");
1282MODULE_ALIAS("platform:spi_qup");