Loading...
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Freescale i.MX7ULP LPSPI driver
4//
5// Copyright 2016 Freescale Semiconductor, Inc.
6// Copyright 2018 NXP Semiconductors
7
8#include <linux/clk.h>
9#include <linux/completion.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/irq.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/pinctrl/consumer.h>
21#include <linux/platform_device.h>
22#include <linux/dma/imx-dma.h>
23#include <linux/pm_runtime.h>
24#include <linux/slab.h>
25#include <linux/spi/spi.h>
26#include <linux/spi/spi_bitbang.h>
27#include <linux/types.h>
28
29#define DRIVER_NAME "fsl_lpspi"
30
31#define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
32
33/* The maximum bytes that edma can transfer once.*/
34#define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
35
36/* i.MX7ULP LPSPI registers */
37#define IMX7ULP_VERID 0x0
38#define IMX7ULP_PARAM 0x4
39#define IMX7ULP_CR 0x10
40#define IMX7ULP_SR 0x14
41#define IMX7ULP_IER 0x18
42#define IMX7ULP_DER 0x1c
43#define IMX7ULP_CFGR0 0x20
44#define IMX7ULP_CFGR1 0x24
45#define IMX7ULP_DMR0 0x30
46#define IMX7ULP_DMR1 0x34
47#define IMX7ULP_CCR 0x40
48#define IMX7ULP_FCR 0x58
49#define IMX7ULP_FSR 0x5c
50#define IMX7ULP_TCR 0x60
51#define IMX7ULP_TDR 0x64
52#define IMX7ULP_RSR 0x70
53#define IMX7ULP_RDR 0x74
54
55/* General control register field define */
56#define CR_RRF BIT(9)
57#define CR_RTF BIT(8)
58#define CR_RST BIT(1)
59#define CR_MEN BIT(0)
60#define SR_MBF BIT(24)
61#define SR_TCF BIT(10)
62#define SR_FCF BIT(9)
63#define SR_RDF BIT(1)
64#define SR_TDF BIT(0)
65#define IER_TCIE BIT(10)
66#define IER_FCIE BIT(9)
67#define IER_RDIE BIT(1)
68#define IER_TDIE BIT(0)
69#define DER_RDDE BIT(1)
70#define DER_TDDE BIT(0)
71#define CFGR1_PCSCFG BIT(27)
72#define CFGR1_PINCFG (BIT(24)|BIT(25))
73#define CFGR1_PCSPOL BIT(8)
74#define CFGR1_NOSTALL BIT(3)
75#define CFGR1_HOST BIT(0)
76#define FSR_TXCOUNT (0xFF)
77#define RSR_RXEMPTY BIT(1)
78#define TCR_CPOL BIT(31)
79#define TCR_CPHA BIT(30)
80#define TCR_CONT BIT(21)
81#define TCR_CONTC BIT(20)
82#define TCR_RXMSK BIT(19)
83#define TCR_TXMSK BIT(18)
84
85struct fsl_lpspi_devtype_data {
86 u8 prescale_max;
87};
88
89struct lpspi_config {
90 u8 bpw;
91 u8 chip_select;
92 u8 prescale;
93 u16 mode;
94 u32 speed_hz;
95 u32 effective_speed_hz;
96};
97
98struct fsl_lpspi_data {
99 struct device *dev;
100 void __iomem *base;
101 unsigned long base_phys;
102 struct clk *clk_ipg;
103 struct clk *clk_per;
104 bool is_target;
105 bool is_only_cs1;
106 bool is_first_byte;
107
108 void *rx_buf;
109 const void *tx_buf;
110 void (*tx)(struct fsl_lpspi_data *);
111 void (*rx)(struct fsl_lpspi_data *);
112
113 u32 remain;
114 u8 watermark;
115 u8 txfifosize;
116 u8 rxfifosize;
117
118 struct lpspi_config config;
119 struct completion xfer_done;
120
121 bool target_aborted;
122
123 /* DMA */
124 bool usedma;
125 struct completion dma_rx_completion;
126 struct completion dma_tx_completion;
127
128 const struct fsl_lpspi_devtype_data *devtype_data;
129};
130
131/*
132 * ERR051608 fixed or not:
133 * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
134 */
135static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
136 .prescale_max = 1,
137};
138
139static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
140 .prescale_max = 7,
141};
142
143static const struct of_device_id fsl_lpspi_dt_ids[] = {
144 { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
145 { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
146 { /* sentinel */ }
147};
148MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
149
150#define LPSPI_BUF_RX(type) \
151static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
152{ \
153 unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
154 \
155 if (fsl_lpspi->rx_buf) { \
156 *(type *)fsl_lpspi->rx_buf = val; \
157 fsl_lpspi->rx_buf += sizeof(type); \
158 } \
159}
160
161#define LPSPI_BUF_TX(type) \
162static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
163{ \
164 type val = 0; \
165 \
166 if (fsl_lpspi->tx_buf) { \
167 val = *(type *)fsl_lpspi->tx_buf; \
168 fsl_lpspi->tx_buf += sizeof(type); \
169 } \
170 \
171 fsl_lpspi->remain -= sizeof(type); \
172 writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
173}
174
175LPSPI_BUF_RX(u8)
176LPSPI_BUF_TX(u8)
177LPSPI_BUF_RX(u16)
178LPSPI_BUF_TX(u16)
179LPSPI_BUF_RX(u32)
180LPSPI_BUF_TX(u32)
181
182static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
183 unsigned int enable)
184{
185 writel(enable, fsl_lpspi->base + IMX7ULP_IER);
186}
187
188static int fsl_lpspi_bytes_per_word(const int bpw)
189{
190 return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
191}
192
193static bool fsl_lpspi_can_dma(struct spi_controller *controller,
194 struct spi_device *spi,
195 struct spi_transfer *transfer)
196{
197 unsigned int bytes_per_word;
198
199 if (!controller->dma_rx)
200 return false;
201
202 bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
203
204 switch (bytes_per_word) {
205 case 1:
206 case 2:
207 case 4:
208 break;
209 default:
210 return false;
211 }
212
213 return true;
214}
215
216static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
217{
218 struct fsl_lpspi_data *fsl_lpspi =
219 spi_controller_get_devdata(controller);
220 int ret;
221
222 ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
223 if (ret < 0) {
224 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
225 return ret;
226 }
227
228 return 0;
229}
230
231static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
232{
233 struct fsl_lpspi_data *fsl_lpspi =
234 spi_controller_get_devdata(controller);
235
236 pm_runtime_mark_last_busy(fsl_lpspi->dev);
237 pm_runtime_put_autosuspend(fsl_lpspi->dev);
238
239 return 0;
240}
241
242static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
243{
244 u8 txfifo_cnt;
245 u32 temp;
246
247 txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
248
249 while (txfifo_cnt < fsl_lpspi->txfifosize) {
250 if (!fsl_lpspi->remain)
251 break;
252 fsl_lpspi->tx(fsl_lpspi);
253 txfifo_cnt++;
254 }
255
256 if (txfifo_cnt < fsl_lpspi->txfifosize) {
257 if (!fsl_lpspi->is_target) {
258 temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
259 temp &= ~TCR_CONTC;
260 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
261 }
262
263 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
264 } else
265 fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
266}
267
268static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
269{
270 while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
271 fsl_lpspi->rx(fsl_lpspi);
272}
273
274static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
275{
276 u32 temp = 0;
277
278 temp |= fsl_lpspi->config.bpw - 1;
279 temp |= (fsl_lpspi->config.mode & 0x3) << 30;
280 temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
281 if (!fsl_lpspi->is_target) {
282 temp |= fsl_lpspi->config.prescale << 27;
283 /*
284 * Set TCR_CONT will keep SS asserted after current transfer.
285 * For the first transfer, clear TCR_CONTC to assert SS.
286 * For subsequent transfer, set TCR_CONTC to keep SS asserted.
287 */
288 if (!fsl_lpspi->usedma) {
289 temp |= TCR_CONT;
290 if (fsl_lpspi->is_first_byte)
291 temp &= ~TCR_CONTC;
292 else
293 temp |= TCR_CONTC;
294 }
295 }
296 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
297
298 dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
299}
300
301static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
302{
303 u32 temp;
304
305 if (!fsl_lpspi->usedma)
306 temp = fsl_lpspi->watermark >> 1 |
307 (fsl_lpspi->watermark >> 1) << 16;
308 else
309 temp = fsl_lpspi->watermark >> 1;
310
311 writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
312
313 dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
314}
315
316static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
317{
318 struct lpspi_config config = fsl_lpspi->config;
319 unsigned int perclk_rate, div;
320 u8 prescale_max;
321 u8 prescale;
322 int scldiv;
323
324 perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
325 prescale_max = fsl_lpspi->devtype_data->prescale_max;
326
327 if (!config.speed_hz) {
328 dev_err(fsl_lpspi->dev,
329 "error: the transmission speed provided is 0!\n");
330 return -EINVAL;
331 }
332
333 if (config.speed_hz > perclk_rate / 2) {
334 dev_err(fsl_lpspi->dev,
335 "per-clk should be at least two times of transfer speed");
336 return -EINVAL;
337 }
338
339 div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
340
341 for (prescale = 0; prescale <= prescale_max; prescale++) {
342 scldiv = div / (1 << prescale) - 2;
343 if (scldiv >= 0 && scldiv < 256) {
344 fsl_lpspi->config.prescale = prescale;
345 break;
346 }
347 }
348
349 if (scldiv < 0 || scldiv >= 256)
350 return -EINVAL;
351
352 writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
353 fsl_lpspi->base + IMX7ULP_CCR);
354
355 fsl_lpspi->config.effective_speed_hz = perclk_rate / (scldiv + 2) *
356 (1 << prescale);
357
358 dev_dbg(fsl_lpspi->dev, "perclk=%u, speed=%u, prescale=%u, scldiv=%d\n",
359 perclk_rate, config.speed_hz, prescale, scldiv);
360
361 return 0;
362}
363
364static int fsl_lpspi_dma_configure(struct spi_controller *controller)
365{
366 int ret;
367 enum dma_slave_buswidth buswidth;
368 struct dma_slave_config rx = {}, tx = {};
369 struct fsl_lpspi_data *fsl_lpspi =
370 spi_controller_get_devdata(controller);
371
372 switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
373 case 4:
374 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
375 break;
376 case 2:
377 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
378 break;
379 case 1:
380 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
381 break;
382 default:
383 return -EINVAL;
384 }
385
386 tx.direction = DMA_MEM_TO_DEV;
387 tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
388 tx.dst_addr_width = buswidth;
389 tx.dst_maxburst = 1;
390 ret = dmaengine_slave_config(controller->dma_tx, &tx);
391 if (ret) {
392 dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
393 ret);
394 return ret;
395 }
396
397 rx.direction = DMA_DEV_TO_MEM;
398 rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
399 rx.src_addr_width = buswidth;
400 rx.src_maxburst = 1;
401 ret = dmaengine_slave_config(controller->dma_rx, &rx);
402 if (ret) {
403 dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
404 ret);
405 return ret;
406 }
407
408 return 0;
409}
410
411static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
412{
413 u32 temp;
414 int ret;
415
416 if (!fsl_lpspi->is_target) {
417 ret = fsl_lpspi_set_bitrate(fsl_lpspi);
418 if (ret)
419 return ret;
420 }
421
422 fsl_lpspi_set_watermark(fsl_lpspi);
423
424 if (!fsl_lpspi->is_target)
425 temp = CFGR1_HOST;
426 else
427 temp = CFGR1_PINCFG;
428 if (fsl_lpspi->config.mode & SPI_CS_HIGH)
429 temp |= CFGR1_PCSPOL;
430 writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
431
432 temp = readl(fsl_lpspi->base + IMX7ULP_CR);
433 temp |= CR_RRF | CR_RTF | CR_MEN;
434 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
435
436 temp = 0;
437 if (fsl_lpspi->usedma)
438 temp = DER_TDDE | DER_RDDE;
439 writel(temp, fsl_lpspi->base + IMX7ULP_DER);
440
441 return 0;
442}
443
444static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
445 struct spi_device *spi,
446 struct spi_transfer *t)
447{
448 struct fsl_lpspi_data *fsl_lpspi =
449 spi_controller_get_devdata(spi->controller);
450
451 if (t == NULL)
452 return -EINVAL;
453
454 fsl_lpspi->config.mode = spi->mode;
455 fsl_lpspi->config.bpw = t->bits_per_word;
456 fsl_lpspi->config.speed_hz = t->speed_hz;
457 if (fsl_lpspi->is_only_cs1)
458 fsl_lpspi->config.chip_select = 1;
459 else
460 fsl_lpspi->config.chip_select = spi_get_chipselect(spi, 0);
461
462 if (!fsl_lpspi->config.speed_hz)
463 fsl_lpspi->config.speed_hz = spi->max_speed_hz;
464 if (!fsl_lpspi->config.bpw)
465 fsl_lpspi->config.bpw = spi->bits_per_word;
466
467 /* Initialize the functions for transfer */
468 if (fsl_lpspi->config.bpw <= 8) {
469 fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
470 fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
471 } else if (fsl_lpspi->config.bpw <= 16) {
472 fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
473 fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
474 } else {
475 fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
476 fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
477 }
478
479 if (t->len <= fsl_lpspi->txfifosize)
480 fsl_lpspi->watermark = t->len;
481 else
482 fsl_lpspi->watermark = fsl_lpspi->txfifosize;
483
484 if (fsl_lpspi_can_dma(controller, spi, t))
485 fsl_lpspi->usedma = true;
486 else
487 fsl_lpspi->usedma = false;
488
489 return fsl_lpspi_config(fsl_lpspi);
490}
491
492static int fsl_lpspi_target_abort(struct spi_controller *controller)
493{
494 struct fsl_lpspi_data *fsl_lpspi =
495 spi_controller_get_devdata(controller);
496
497 fsl_lpspi->target_aborted = true;
498 if (!fsl_lpspi->usedma)
499 complete(&fsl_lpspi->xfer_done);
500 else {
501 complete(&fsl_lpspi->dma_tx_completion);
502 complete(&fsl_lpspi->dma_rx_completion);
503 }
504
505 return 0;
506}
507
508static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
509{
510 struct fsl_lpspi_data *fsl_lpspi =
511 spi_controller_get_devdata(controller);
512
513 if (fsl_lpspi->is_target) {
514 if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
515 fsl_lpspi->target_aborted) {
516 dev_dbg(fsl_lpspi->dev, "interrupted\n");
517 return -EINTR;
518 }
519 } else {
520 if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
521 dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
522 return -ETIMEDOUT;
523 }
524 }
525
526 return 0;
527}
528
529static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
530{
531 u32 temp;
532
533 if (!fsl_lpspi->usedma) {
534 /* Disable all interrupt */
535 fsl_lpspi_intctrl(fsl_lpspi, 0);
536 }
537
538 /* W1C for all flags in SR */
539 temp = 0x3F << 8;
540 writel(temp, fsl_lpspi->base + IMX7ULP_SR);
541
542 /* Clear FIFO and disable module */
543 temp = CR_RRF | CR_RTF;
544 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
545
546 return 0;
547}
548
549static void fsl_lpspi_dma_rx_callback(void *cookie)
550{
551 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
552
553 complete(&fsl_lpspi->dma_rx_completion);
554}
555
556static void fsl_lpspi_dma_tx_callback(void *cookie)
557{
558 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
559
560 complete(&fsl_lpspi->dma_tx_completion);
561}
562
563static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
564 int size)
565{
566 unsigned long timeout = 0;
567
568 /* Time with actual data transfer and CS change delay related to HW */
569 timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
570
571 /* Add extra second for scheduler related activities */
572 timeout += 1;
573
574 /* Double calculated timeout */
575 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
576}
577
578static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
579 struct fsl_lpspi_data *fsl_lpspi,
580 struct spi_transfer *transfer)
581{
582 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
583 unsigned long transfer_timeout;
584 unsigned long time_left;
585 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
586 int ret;
587
588 ret = fsl_lpspi_dma_configure(controller);
589 if (ret)
590 return ret;
591
592 desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
593 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
594 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
595 if (!desc_rx)
596 return -EINVAL;
597
598 desc_rx->callback = fsl_lpspi_dma_rx_callback;
599 desc_rx->callback_param = (void *)fsl_lpspi;
600 dmaengine_submit(desc_rx);
601 reinit_completion(&fsl_lpspi->dma_rx_completion);
602 dma_async_issue_pending(controller->dma_rx);
603
604 desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
605 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
606 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
607 if (!desc_tx) {
608 dmaengine_terminate_all(controller->dma_tx);
609 return -EINVAL;
610 }
611
612 desc_tx->callback = fsl_lpspi_dma_tx_callback;
613 desc_tx->callback_param = (void *)fsl_lpspi;
614 dmaengine_submit(desc_tx);
615 reinit_completion(&fsl_lpspi->dma_tx_completion);
616 dma_async_issue_pending(controller->dma_tx);
617
618 fsl_lpspi->target_aborted = false;
619
620 if (!fsl_lpspi->is_target) {
621 transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
622 transfer->len);
623
624 /* Wait eDMA to finish the data transfer.*/
625 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
626 transfer_timeout);
627 if (!time_left) {
628 dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
629 dmaengine_terminate_all(controller->dma_tx);
630 dmaengine_terminate_all(controller->dma_rx);
631 fsl_lpspi_reset(fsl_lpspi);
632 return -ETIMEDOUT;
633 }
634
635 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
636 transfer_timeout);
637 if (!time_left) {
638 dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
639 dmaengine_terminate_all(controller->dma_tx);
640 dmaengine_terminate_all(controller->dma_rx);
641 fsl_lpspi_reset(fsl_lpspi);
642 return -ETIMEDOUT;
643 }
644 } else {
645 if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
646 fsl_lpspi->target_aborted) {
647 dev_dbg(fsl_lpspi->dev,
648 "I/O Error in DMA TX interrupted\n");
649 dmaengine_terminate_all(controller->dma_tx);
650 dmaengine_terminate_all(controller->dma_rx);
651 fsl_lpspi_reset(fsl_lpspi);
652 return -EINTR;
653 }
654
655 if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
656 fsl_lpspi->target_aborted) {
657 dev_dbg(fsl_lpspi->dev,
658 "I/O Error in DMA RX interrupted\n");
659 dmaengine_terminate_all(controller->dma_tx);
660 dmaengine_terminate_all(controller->dma_rx);
661 fsl_lpspi_reset(fsl_lpspi);
662 return -EINTR;
663 }
664 }
665
666 fsl_lpspi_reset(fsl_lpspi);
667
668 return 0;
669}
670
671static void fsl_lpspi_dma_exit(struct spi_controller *controller)
672{
673 if (controller->dma_rx) {
674 dma_release_channel(controller->dma_rx);
675 controller->dma_rx = NULL;
676 }
677
678 if (controller->dma_tx) {
679 dma_release_channel(controller->dma_tx);
680 controller->dma_tx = NULL;
681 }
682}
683
684static int fsl_lpspi_dma_init(struct device *dev,
685 struct fsl_lpspi_data *fsl_lpspi,
686 struct spi_controller *controller)
687{
688 int ret;
689
690 /* Prepare for TX DMA: */
691 controller->dma_tx = dma_request_chan(dev, "tx");
692 if (IS_ERR(controller->dma_tx)) {
693 ret = PTR_ERR(controller->dma_tx);
694 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
695 controller->dma_tx = NULL;
696 goto err;
697 }
698
699 /* Prepare for RX DMA: */
700 controller->dma_rx = dma_request_chan(dev, "rx");
701 if (IS_ERR(controller->dma_rx)) {
702 ret = PTR_ERR(controller->dma_rx);
703 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
704 controller->dma_rx = NULL;
705 goto err;
706 }
707
708 init_completion(&fsl_lpspi->dma_rx_completion);
709 init_completion(&fsl_lpspi->dma_tx_completion);
710 controller->can_dma = fsl_lpspi_can_dma;
711 controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
712
713 return 0;
714err:
715 fsl_lpspi_dma_exit(controller);
716 return ret;
717}
718
719static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
720 struct spi_transfer *t)
721{
722 struct fsl_lpspi_data *fsl_lpspi =
723 spi_controller_get_devdata(controller);
724 int ret;
725
726 fsl_lpspi->tx_buf = t->tx_buf;
727 fsl_lpspi->rx_buf = t->rx_buf;
728 fsl_lpspi->remain = t->len;
729
730 reinit_completion(&fsl_lpspi->xfer_done);
731 fsl_lpspi->target_aborted = false;
732
733 fsl_lpspi_write_tx_fifo(fsl_lpspi);
734
735 ret = fsl_lpspi_wait_for_completion(controller);
736 if (ret)
737 return ret;
738
739 fsl_lpspi_reset(fsl_lpspi);
740
741 return 0;
742}
743
744static int fsl_lpspi_transfer_one(struct spi_controller *controller,
745 struct spi_device *spi,
746 struct spi_transfer *t)
747{
748 struct fsl_lpspi_data *fsl_lpspi =
749 spi_controller_get_devdata(controller);
750 int ret;
751
752 fsl_lpspi->is_first_byte = true;
753 ret = fsl_lpspi_setup_transfer(controller, spi, t);
754 if (ret < 0)
755 return ret;
756
757 t->effective_speed_hz = fsl_lpspi->config.effective_speed_hz;
758
759 fsl_lpspi_set_cmd(fsl_lpspi);
760 fsl_lpspi->is_first_byte = false;
761
762 if (fsl_lpspi->usedma)
763 ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
764 else
765 ret = fsl_lpspi_pio_transfer(controller, t);
766 if (ret < 0)
767 return ret;
768
769 return 0;
770}
771
772static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
773{
774 u32 temp_SR, temp_IER;
775 struct fsl_lpspi_data *fsl_lpspi = dev_id;
776
777 temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
778 fsl_lpspi_intctrl(fsl_lpspi, 0);
779 temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
780
781 fsl_lpspi_read_rx_fifo(fsl_lpspi);
782
783 if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
784 fsl_lpspi_write_tx_fifo(fsl_lpspi);
785 return IRQ_HANDLED;
786 }
787
788 if (temp_SR & SR_MBF ||
789 readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
790 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
791 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
792 return IRQ_HANDLED;
793 }
794
795 if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
796 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
797 complete(&fsl_lpspi->xfer_done);
798 return IRQ_HANDLED;
799 }
800
801 return IRQ_NONE;
802}
803
804#ifdef CONFIG_PM
805static int fsl_lpspi_runtime_resume(struct device *dev)
806{
807 struct spi_controller *controller = dev_get_drvdata(dev);
808 struct fsl_lpspi_data *fsl_lpspi;
809 int ret;
810
811 fsl_lpspi = spi_controller_get_devdata(controller);
812
813 ret = clk_prepare_enable(fsl_lpspi->clk_per);
814 if (ret)
815 return ret;
816
817 ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
818 if (ret) {
819 clk_disable_unprepare(fsl_lpspi->clk_per);
820 return ret;
821 }
822
823 return 0;
824}
825
826static int fsl_lpspi_runtime_suspend(struct device *dev)
827{
828 struct spi_controller *controller = dev_get_drvdata(dev);
829 struct fsl_lpspi_data *fsl_lpspi;
830
831 fsl_lpspi = spi_controller_get_devdata(controller);
832
833 clk_disable_unprepare(fsl_lpspi->clk_per);
834 clk_disable_unprepare(fsl_lpspi->clk_ipg);
835
836 return 0;
837}
838#endif
839
840static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
841{
842 struct device *dev = fsl_lpspi->dev;
843
844 pm_runtime_enable(dev);
845 pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
846 pm_runtime_use_autosuspend(dev);
847
848 return 0;
849}
850
851static int fsl_lpspi_probe(struct platform_device *pdev)
852{
853 const struct fsl_lpspi_devtype_data *devtype_data;
854 struct fsl_lpspi_data *fsl_lpspi;
855 struct spi_controller *controller;
856 struct resource *res;
857 int ret, irq;
858 u32 num_cs;
859 u32 temp;
860 bool is_target;
861
862 devtype_data = of_device_get_match_data(&pdev->dev);
863 if (!devtype_data)
864 return -ENODEV;
865
866 is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
867 if (is_target)
868 controller = devm_spi_alloc_target(&pdev->dev,
869 sizeof(struct fsl_lpspi_data));
870 else
871 controller = devm_spi_alloc_host(&pdev->dev,
872 sizeof(struct fsl_lpspi_data));
873
874 if (!controller)
875 return -ENOMEM;
876
877 platform_set_drvdata(pdev, controller);
878
879 fsl_lpspi = spi_controller_get_devdata(controller);
880 fsl_lpspi->dev = &pdev->dev;
881 fsl_lpspi->is_target = is_target;
882 fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
883 "fsl,spi-only-use-cs1-sel");
884 fsl_lpspi->devtype_data = devtype_data;
885
886 init_completion(&fsl_lpspi->xfer_done);
887
888 fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
889 if (IS_ERR(fsl_lpspi->base)) {
890 ret = PTR_ERR(fsl_lpspi->base);
891 return ret;
892 }
893 fsl_lpspi->base_phys = res->start;
894
895 irq = platform_get_irq(pdev, 0);
896 if (irq < 0) {
897 ret = irq;
898 return ret;
899 }
900
901 ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, IRQF_NO_AUTOEN,
902 dev_name(&pdev->dev), fsl_lpspi);
903 if (ret) {
904 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
905 return ret;
906 }
907
908 fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
909 if (IS_ERR(fsl_lpspi->clk_per)) {
910 ret = PTR_ERR(fsl_lpspi->clk_per);
911 return ret;
912 }
913
914 fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
915 if (IS_ERR(fsl_lpspi->clk_ipg)) {
916 ret = PTR_ERR(fsl_lpspi->clk_ipg);
917 return ret;
918 }
919
920 /* enable the clock */
921 ret = fsl_lpspi_init_rpm(fsl_lpspi);
922 if (ret)
923 return ret;
924
925 ret = pm_runtime_get_sync(fsl_lpspi->dev);
926 if (ret < 0) {
927 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
928 goto out_pm_get;
929 }
930
931 temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
932 fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
933 fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
934 if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
935 &num_cs)) {
936 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx93-spi"))
937 num_cs = ((temp >> 16) & 0xf);
938 else
939 num_cs = 1;
940 }
941
942 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
943 controller->transfer_one = fsl_lpspi_transfer_one;
944 controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
945 controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
946 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
947 controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
948 controller->dev.of_node = pdev->dev.of_node;
949 controller->bus_num = pdev->id;
950 controller->num_chipselect = num_cs;
951 controller->target_abort = fsl_lpspi_target_abort;
952 if (!fsl_lpspi->is_target)
953 controller->use_gpio_descriptors = true;
954
955 ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
956 if (ret == -EPROBE_DEFER)
957 goto out_pm_get;
958 if (ret < 0) {
959 dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
960 enable_irq(irq);
961 }
962
963 ret = devm_spi_register_controller(&pdev->dev, controller);
964 if (ret < 0) {
965 dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
966 goto free_dma;
967 }
968
969 pm_runtime_mark_last_busy(fsl_lpspi->dev);
970 pm_runtime_put_autosuspend(fsl_lpspi->dev);
971
972 return 0;
973
974free_dma:
975 fsl_lpspi_dma_exit(controller);
976out_pm_get:
977 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
978 pm_runtime_put_sync(fsl_lpspi->dev);
979 pm_runtime_disable(fsl_lpspi->dev);
980
981 return ret;
982}
983
984static void fsl_lpspi_remove(struct platform_device *pdev)
985{
986 struct spi_controller *controller = platform_get_drvdata(pdev);
987 struct fsl_lpspi_data *fsl_lpspi =
988 spi_controller_get_devdata(controller);
989
990 fsl_lpspi_dma_exit(controller);
991
992 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
993 pm_runtime_disable(fsl_lpspi->dev);
994}
995
996static int fsl_lpspi_suspend(struct device *dev)
997{
998 pinctrl_pm_select_sleep_state(dev);
999 return pm_runtime_force_suspend(dev);
1000}
1001
1002static int fsl_lpspi_resume(struct device *dev)
1003{
1004 int ret;
1005
1006 ret = pm_runtime_force_resume(dev);
1007 if (ret) {
1008 dev_err(dev, "Error in resume: %d\n", ret);
1009 return ret;
1010 }
1011
1012 pinctrl_pm_select_default_state(dev);
1013
1014 return 0;
1015}
1016
1017static const struct dev_pm_ops fsl_lpspi_pm_ops = {
1018 SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
1019 fsl_lpspi_runtime_resume, NULL)
1020 SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
1021};
1022
1023static struct platform_driver fsl_lpspi_driver = {
1024 .driver = {
1025 .name = DRIVER_NAME,
1026 .of_match_table = fsl_lpspi_dt_ids,
1027 .pm = pm_ptr(&fsl_lpspi_pm_ops),
1028 },
1029 .probe = fsl_lpspi_probe,
1030 .remove = fsl_lpspi_remove,
1031};
1032module_platform_driver(fsl_lpspi_driver);
1033
1034MODULE_DESCRIPTION("LPSPI Controller driver");
1035MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1036MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Freescale i.MX7ULP LPSPI driver
4//
5// Copyright 2016 Freescale Semiconductor, Inc.
6// Copyright 2018 NXP Semiconductors
7
8#include <linux/clk.h>
9#include <linux/completion.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/irq.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/pinctrl/consumer.h>
22#include <linux/platform_device.h>
23#include <linux/platform_data/dma-imx.h>
24#include <linux/pm_runtime.h>
25#include <linux/slab.h>
26#include <linux/spi/spi.h>
27#include <linux/spi/spi_bitbang.h>
28#include <linux/types.h>
29
30#define DRIVER_NAME "fsl_lpspi"
31
32#define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
33
34/* The maximum bytes that edma can transfer once.*/
35#define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
36
37/* i.MX7ULP LPSPI registers */
38#define IMX7ULP_VERID 0x0
39#define IMX7ULP_PARAM 0x4
40#define IMX7ULP_CR 0x10
41#define IMX7ULP_SR 0x14
42#define IMX7ULP_IER 0x18
43#define IMX7ULP_DER 0x1c
44#define IMX7ULP_CFGR0 0x20
45#define IMX7ULP_CFGR1 0x24
46#define IMX7ULP_DMR0 0x30
47#define IMX7ULP_DMR1 0x34
48#define IMX7ULP_CCR 0x40
49#define IMX7ULP_FCR 0x58
50#define IMX7ULP_FSR 0x5c
51#define IMX7ULP_TCR 0x60
52#define IMX7ULP_TDR 0x64
53#define IMX7ULP_RSR 0x70
54#define IMX7ULP_RDR 0x74
55
56/* General control register field define */
57#define CR_RRF BIT(9)
58#define CR_RTF BIT(8)
59#define CR_RST BIT(1)
60#define CR_MEN BIT(0)
61#define SR_MBF BIT(24)
62#define SR_TCF BIT(10)
63#define SR_FCF BIT(9)
64#define SR_RDF BIT(1)
65#define SR_TDF BIT(0)
66#define IER_TCIE BIT(10)
67#define IER_FCIE BIT(9)
68#define IER_RDIE BIT(1)
69#define IER_TDIE BIT(0)
70#define DER_RDDE BIT(1)
71#define DER_TDDE BIT(0)
72#define CFGR1_PCSCFG BIT(27)
73#define CFGR1_PINCFG (BIT(24)|BIT(25))
74#define CFGR1_PCSPOL BIT(8)
75#define CFGR1_NOSTALL BIT(3)
76#define CFGR1_MASTER BIT(0)
77#define FSR_TXCOUNT (0xFF)
78#define RSR_RXEMPTY BIT(1)
79#define TCR_CPOL BIT(31)
80#define TCR_CPHA BIT(30)
81#define TCR_CONT BIT(21)
82#define TCR_CONTC BIT(20)
83#define TCR_RXMSK BIT(19)
84#define TCR_TXMSK BIT(18)
85
86struct lpspi_config {
87 u8 bpw;
88 u8 chip_select;
89 u8 prescale;
90 u16 mode;
91 u32 speed_hz;
92};
93
94struct fsl_lpspi_data {
95 struct device *dev;
96 void __iomem *base;
97 unsigned long base_phys;
98 struct clk *clk_ipg;
99 struct clk *clk_per;
100 bool is_slave;
101 bool is_only_cs1;
102 bool is_first_byte;
103
104 void *rx_buf;
105 const void *tx_buf;
106 void (*tx)(struct fsl_lpspi_data *);
107 void (*rx)(struct fsl_lpspi_data *);
108
109 u32 remain;
110 u8 watermark;
111 u8 txfifosize;
112 u8 rxfifosize;
113
114 struct lpspi_config config;
115 struct completion xfer_done;
116
117 bool slave_aborted;
118
119 /* DMA */
120 bool usedma;
121 struct completion dma_rx_completion;
122 struct completion dma_tx_completion;
123};
124
125static const struct of_device_id fsl_lpspi_dt_ids[] = {
126 { .compatible = "fsl,imx7ulp-spi", },
127 { /* sentinel */ }
128};
129MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
130
131#define LPSPI_BUF_RX(type) \
132static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
133{ \
134 unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
135 \
136 if (fsl_lpspi->rx_buf) { \
137 *(type *)fsl_lpspi->rx_buf = val; \
138 fsl_lpspi->rx_buf += sizeof(type); \
139 } \
140}
141
142#define LPSPI_BUF_TX(type) \
143static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
144{ \
145 type val = 0; \
146 \
147 if (fsl_lpspi->tx_buf) { \
148 val = *(type *)fsl_lpspi->tx_buf; \
149 fsl_lpspi->tx_buf += sizeof(type); \
150 } \
151 \
152 fsl_lpspi->remain -= sizeof(type); \
153 writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
154}
155
156LPSPI_BUF_RX(u8)
157LPSPI_BUF_TX(u8)
158LPSPI_BUF_RX(u16)
159LPSPI_BUF_TX(u16)
160LPSPI_BUF_RX(u32)
161LPSPI_BUF_TX(u32)
162
163static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
164 unsigned int enable)
165{
166 writel(enable, fsl_lpspi->base + IMX7ULP_IER);
167}
168
169static int fsl_lpspi_bytes_per_word(const int bpw)
170{
171 return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
172}
173
174static bool fsl_lpspi_can_dma(struct spi_controller *controller,
175 struct spi_device *spi,
176 struct spi_transfer *transfer)
177{
178 unsigned int bytes_per_word;
179
180 if (!controller->dma_rx)
181 return false;
182
183 bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
184
185 switch (bytes_per_word) {
186 case 1:
187 case 2:
188 case 4:
189 break;
190 default:
191 return false;
192 }
193
194 return true;
195}
196
197static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
198{
199 struct fsl_lpspi_data *fsl_lpspi =
200 spi_controller_get_devdata(controller);
201 int ret;
202
203 ret = pm_runtime_get_sync(fsl_lpspi->dev);
204 if (ret < 0) {
205 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
206 return ret;
207 }
208
209 return 0;
210}
211
212static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
213{
214 struct fsl_lpspi_data *fsl_lpspi =
215 spi_controller_get_devdata(controller);
216
217 pm_runtime_mark_last_busy(fsl_lpspi->dev);
218 pm_runtime_put_autosuspend(fsl_lpspi->dev);
219
220 return 0;
221}
222
223static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
224{
225 u8 txfifo_cnt;
226 u32 temp;
227
228 txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
229
230 while (txfifo_cnt < fsl_lpspi->txfifosize) {
231 if (!fsl_lpspi->remain)
232 break;
233 fsl_lpspi->tx(fsl_lpspi);
234 txfifo_cnt++;
235 }
236
237 if (txfifo_cnt < fsl_lpspi->txfifosize) {
238 if (!fsl_lpspi->is_slave) {
239 temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
240 temp &= ~TCR_CONTC;
241 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
242 }
243
244 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
245 } else
246 fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
247}
248
249static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
250{
251 while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
252 fsl_lpspi->rx(fsl_lpspi);
253}
254
255static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
256{
257 u32 temp = 0;
258
259 temp |= fsl_lpspi->config.bpw - 1;
260 temp |= (fsl_lpspi->config.mode & 0x3) << 30;
261 temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
262 if (!fsl_lpspi->is_slave) {
263 temp |= fsl_lpspi->config.prescale << 27;
264 /*
265 * Set TCR_CONT will keep SS asserted after current transfer.
266 * For the first transfer, clear TCR_CONTC to assert SS.
267 * For subsequent transfer, set TCR_CONTC to keep SS asserted.
268 */
269 if (!fsl_lpspi->usedma) {
270 temp |= TCR_CONT;
271 if (fsl_lpspi->is_first_byte)
272 temp &= ~TCR_CONTC;
273 else
274 temp |= TCR_CONTC;
275 }
276 }
277 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
278
279 dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
280}
281
282static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
283{
284 u32 temp;
285
286 if (!fsl_lpspi->usedma)
287 temp = fsl_lpspi->watermark >> 1 |
288 (fsl_lpspi->watermark >> 1) << 16;
289 else
290 temp = fsl_lpspi->watermark >> 1;
291
292 writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
293
294 dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
295}
296
297static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
298{
299 struct lpspi_config config = fsl_lpspi->config;
300 unsigned int perclk_rate, scldiv;
301 u8 prescale;
302
303 perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
304
305 if (config.speed_hz > perclk_rate / 2) {
306 dev_err(fsl_lpspi->dev,
307 "per-clk should be at least two times of transfer speed");
308 return -EINVAL;
309 }
310
311 for (prescale = 0; prescale < 8; prescale++) {
312 scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2;
313 if (scldiv < 256) {
314 fsl_lpspi->config.prescale = prescale;
315 break;
316 }
317 }
318
319 if (scldiv >= 256)
320 return -EINVAL;
321
322 writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
323 fsl_lpspi->base + IMX7ULP_CCR);
324
325 dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n",
326 perclk_rate, config.speed_hz, prescale, scldiv);
327
328 return 0;
329}
330
331static int fsl_lpspi_dma_configure(struct spi_controller *controller)
332{
333 int ret;
334 enum dma_slave_buswidth buswidth;
335 struct dma_slave_config rx = {}, tx = {};
336 struct fsl_lpspi_data *fsl_lpspi =
337 spi_controller_get_devdata(controller);
338
339 switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
340 case 4:
341 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
342 break;
343 case 2:
344 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
345 break;
346 case 1:
347 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
348 break;
349 default:
350 return -EINVAL;
351 }
352
353 tx.direction = DMA_MEM_TO_DEV;
354 tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
355 tx.dst_addr_width = buswidth;
356 tx.dst_maxburst = 1;
357 ret = dmaengine_slave_config(controller->dma_tx, &tx);
358 if (ret) {
359 dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
360 ret);
361 return ret;
362 }
363
364 rx.direction = DMA_DEV_TO_MEM;
365 rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
366 rx.src_addr_width = buswidth;
367 rx.src_maxburst = 1;
368 ret = dmaengine_slave_config(controller->dma_rx, &rx);
369 if (ret) {
370 dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
371 ret);
372 return ret;
373 }
374
375 return 0;
376}
377
378static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
379{
380 u32 temp;
381 int ret;
382
383 if (!fsl_lpspi->is_slave) {
384 ret = fsl_lpspi_set_bitrate(fsl_lpspi);
385 if (ret)
386 return ret;
387 }
388
389 fsl_lpspi_set_watermark(fsl_lpspi);
390
391 if (!fsl_lpspi->is_slave)
392 temp = CFGR1_MASTER;
393 else
394 temp = CFGR1_PINCFG;
395 if (fsl_lpspi->config.mode & SPI_CS_HIGH)
396 temp |= CFGR1_PCSPOL;
397 writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
398
399 temp = readl(fsl_lpspi->base + IMX7ULP_CR);
400 temp |= CR_RRF | CR_RTF | CR_MEN;
401 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
402
403 temp = 0;
404 if (fsl_lpspi->usedma)
405 temp = DER_TDDE | DER_RDDE;
406 writel(temp, fsl_lpspi->base + IMX7ULP_DER);
407
408 return 0;
409}
410
411static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
412 struct spi_device *spi,
413 struct spi_transfer *t)
414{
415 struct fsl_lpspi_data *fsl_lpspi =
416 spi_controller_get_devdata(spi->controller);
417
418 if (t == NULL)
419 return -EINVAL;
420
421 fsl_lpspi->config.mode = spi->mode;
422 fsl_lpspi->config.bpw = t->bits_per_word;
423 fsl_lpspi->config.speed_hz = t->speed_hz;
424 if (fsl_lpspi->is_only_cs1)
425 fsl_lpspi->config.chip_select = 1;
426 else
427 fsl_lpspi->config.chip_select = spi->chip_select;
428
429 if (!fsl_lpspi->config.speed_hz)
430 fsl_lpspi->config.speed_hz = spi->max_speed_hz;
431 if (!fsl_lpspi->config.bpw)
432 fsl_lpspi->config.bpw = spi->bits_per_word;
433
434 /* Initialize the functions for transfer */
435 if (fsl_lpspi->config.bpw <= 8) {
436 fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
437 fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
438 } else if (fsl_lpspi->config.bpw <= 16) {
439 fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
440 fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
441 } else {
442 fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
443 fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
444 }
445
446 if (t->len <= fsl_lpspi->txfifosize)
447 fsl_lpspi->watermark = t->len;
448 else
449 fsl_lpspi->watermark = fsl_lpspi->txfifosize;
450
451 if (fsl_lpspi_can_dma(controller, spi, t))
452 fsl_lpspi->usedma = true;
453 else
454 fsl_lpspi->usedma = false;
455
456 return fsl_lpspi_config(fsl_lpspi);
457}
458
459static int fsl_lpspi_slave_abort(struct spi_controller *controller)
460{
461 struct fsl_lpspi_data *fsl_lpspi =
462 spi_controller_get_devdata(controller);
463
464 fsl_lpspi->slave_aborted = true;
465 if (!fsl_lpspi->usedma)
466 complete(&fsl_lpspi->xfer_done);
467 else {
468 complete(&fsl_lpspi->dma_tx_completion);
469 complete(&fsl_lpspi->dma_rx_completion);
470 }
471
472 return 0;
473}
474
475static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
476{
477 struct fsl_lpspi_data *fsl_lpspi =
478 spi_controller_get_devdata(controller);
479
480 if (fsl_lpspi->is_slave) {
481 if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
482 fsl_lpspi->slave_aborted) {
483 dev_dbg(fsl_lpspi->dev, "interrupted\n");
484 return -EINTR;
485 }
486 } else {
487 if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
488 dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
489 return -ETIMEDOUT;
490 }
491 }
492
493 return 0;
494}
495
496static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
497{
498 u32 temp;
499
500 if (!fsl_lpspi->usedma) {
501 /* Disable all interrupt */
502 fsl_lpspi_intctrl(fsl_lpspi, 0);
503 }
504
505 /* W1C for all flags in SR */
506 temp = 0x3F << 8;
507 writel(temp, fsl_lpspi->base + IMX7ULP_SR);
508
509 /* Clear FIFO and disable module */
510 temp = CR_RRF | CR_RTF;
511 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
512
513 return 0;
514}
515
516static void fsl_lpspi_dma_rx_callback(void *cookie)
517{
518 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
519
520 complete(&fsl_lpspi->dma_rx_completion);
521}
522
523static void fsl_lpspi_dma_tx_callback(void *cookie)
524{
525 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
526
527 complete(&fsl_lpspi->dma_tx_completion);
528}
529
530static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
531 int size)
532{
533 unsigned long timeout = 0;
534
535 /* Time with actual data transfer and CS change delay related to HW */
536 timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
537
538 /* Add extra second for scheduler related activities */
539 timeout += 1;
540
541 /* Double calculated timeout */
542 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
543}
544
545static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
546 struct fsl_lpspi_data *fsl_lpspi,
547 struct spi_transfer *transfer)
548{
549 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
550 unsigned long transfer_timeout;
551 unsigned long timeout;
552 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
553 int ret;
554
555 ret = fsl_lpspi_dma_configure(controller);
556 if (ret)
557 return ret;
558
559 desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
560 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
561 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
562 if (!desc_rx)
563 return -EINVAL;
564
565 desc_rx->callback = fsl_lpspi_dma_rx_callback;
566 desc_rx->callback_param = (void *)fsl_lpspi;
567 dmaengine_submit(desc_rx);
568 reinit_completion(&fsl_lpspi->dma_rx_completion);
569 dma_async_issue_pending(controller->dma_rx);
570
571 desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
572 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
573 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
574 if (!desc_tx) {
575 dmaengine_terminate_all(controller->dma_tx);
576 return -EINVAL;
577 }
578
579 desc_tx->callback = fsl_lpspi_dma_tx_callback;
580 desc_tx->callback_param = (void *)fsl_lpspi;
581 dmaengine_submit(desc_tx);
582 reinit_completion(&fsl_lpspi->dma_tx_completion);
583 dma_async_issue_pending(controller->dma_tx);
584
585 fsl_lpspi->slave_aborted = false;
586
587 if (!fsl_lpspi->is_slave) {
588 transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
589 transfer->len);
590
591 /* Wait eDMA to finish the data transfer.*/
592 timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
593 transfer_timeout);
594 if (!timeout) {
595 dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
596 dmaengine_terminate_all(controller->dma_tx);
597 dmaengine_terminate_all(controller->dma_rx);
598 fsl_lpspi_reset(fsl_lpspi);
599 return -ETIMEDOUT;
600 }
601
602 timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
603 transfer_timeout);
604 if (!timeout) {
605 dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
606 dmaengine_terminate_all(controller->dma_tx);
607 dmaengine_terminate_all(controller->dma_rx);
608 fsl_lpspi_reset(fsl_lpspi);
609 return -ETIMEDOUT;
610 }
611 } else {
612 if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
613 fsl_lpspi->slave_aborted) {
614 dev_dbg(fsl_lpspi->dev,
615 "I/O Error in DMA TX interrupted\n");
616 dmaengine_terminate_all(controller->dma_tx);
617 dmaengine_terminate_all(controller->dma_rx);
618 fsl_lpspi_reset(fsl_lpspi);
619 return -EINTR;
620 }
621
622 if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
623 fsl_lpspi->slave_aborted) {
624 dev_dbg(fsl_lpspi->dev,
625 "I/O Error in DMA RX interrupted\n");
626 dmaengine_terminate_all(controller->dma_tx);
627 dmaengine_terminate_all(controller->dma_rx);
628 fsl_lpspi_reset(fsl_lpspi);
629 return -EINTR;
630 }
631 }
632
633 fsl_lpspi_reset(fsl_lpspi);
634
635 return 0;
636}
637
638static void fsl_lpspi_dma_exit(struct spi_controller *controller)
639{
640 if (controller->dma_rx) {
641 dma_release_channel(controller->dma_rx);
642 controller->dma_rx = NULL;
643 }
644
645 if (controller->dma_tx) {
646 dma_release_channel(controller->dma_tx);
647 controller->dma_tx = NULL;
648 }
649}
650
651static int fsl_lpspi_dma_init(struct device *dev,
652 struct fsl_lpspi_data *fsl_lpspi,
653 struct spi_controller *controller)
654{
655 int ret;
656
657 /* Prepare for TX DMA: */
658 controller->dma_tx = dma_request_chan(dev, "tx");
659 if (IS_ERR(controller->dma_tx)) {
660 ret = PTR_ERR(controller->dma_tx);
661 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
662 controller->dma_tx = NULL;
663 goto err;
664 }
665
666 /* Prepare for RX DMA: */
667 controller->dma_rx = dma_request_chan(dev, "rx");
668 if (IS_ERR(controller->dma_rx)) {
669 ret = PTR_ERR(controller->dma_rx);
670 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
671 controller->dma_rx = NULL;
672 goto err;
673 }
674
675 init_completion(&fsl_lpspi->dma_rx_completion);
676 init_completion(&fsl_lpspi->dma_tx_completion);
677 controller->can_dma = fsl_lpspi_can_dma;
678 controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
679
680 return 0;
681err:
682 fsl_lpspi_dma_exit(controller);
683 return ret;
684}
685
686static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
687 struct spi_transfer *t)
688{
689 struct fsl_lpspi_data *fsl_lpspi =
690 spi_controller_get_devdata(controller);
691 int ret;
692
693 fsl_lpspi->tx_buf = t->tx_buf;
694 fsl_lpspi->rx_buf = t->rx_buf;
695 fsl_lpspi->remain = t->len;
696
697 reinit_completion(&fsl_lpspi->xfer_done);
698 fsl_lpspi->slave_aborted = false;
699
700 fsl_lpspi_write_tx_fifo(fsl_lpspi);
701
702 ret = fsl_lpspi_wait_for_completion(controller);
703 if (ret)
704 return ret;
705
706 fsl_lpspi_reset(fsl_lpspi);
707
708 return 0;
709}
710
711static int fsl_lpspi_transfer_one(struct spi_controller *controller,
712 struct spi_device *spi,
713 struct spi_transfer *t)
714{
715 struct fsl_lpspi_data *fsl_lpspi =
716 spi_controller_get_devdata(controller);
717 int ret;
718
719 fsl_lpspi->is_first_byte = true;
720 ret = fsl_lpspi_setup_transfer(controller, spi, t);
721 if (ret < 0)
722 return ret;
723
724 fsl_lpspi_set_cmd(fsl_lpspi);
725 fsl_lpspi->is_first_byte = false;
726
727 if (fsl_lpspi->usedma)
728 ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
729 else
730 ret = fsl_lpspi_pio_transfer(controller, t);
731 if (ret < 0)
732 return ret;
733
734 return 0;
735}
736
737static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
738{
739 u32 temp_SR, temp_IER;
740 struct fsl_lpspi_data *fsl_lpspi = dev_id;
741
742 temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
743 fsl_lpspi_intctrl(fsl_lpspi, 0);
744 temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
745
746 fsl_lpspi_read_rx_fifo(fsl_lpspi);
747
748 if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
749 fsl_lpspi_write_tx_fifo(fsl_lpspi);
750 return IRQ_HANDLED;
751 }
752
753 if (temp_SR & SR_MBF ||
754 readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
755 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
756 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
757 return IRQ_HANDLED;
758 }
759
760 if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
761 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
762 complete(&fsl_lpspi->xfer_done);
763 return IRQ_HANDLED;
764 }
765
766 return IRQ_NONE;
767}
768
769#ifdef CONFIG_PM
770static int fsl_lpspi_runtime_resume(struct device *dev)
771{
772 struct spi_controller *controller = dev_get_drvdata(dev);
773 struct fsl_lpspi_data *fsl_lpspi;
774 int ret;
775
776 fsl_lpspi = spi_controller_get_devdata(controller);
777
778 ret = clk_prepare_enable(fsl_lpspi->clk_per);
779 if (ret)
780 return ret;
781
782 ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
783 if (ret) {
784 clk_disable_unprepare(fsl_lpspi->clk_per);
785 return ret;
786 }
787
788 return 0;
789}
790
791static int fsl_lpspi_runtime_suspend(struct device *dev)
792{
793 struct spi_controller *controller = dev_get_drvdata(dev);
794 struct fsl_lpspi_data *fsl_lpspi;
795
796 fsl_lpspi = spi_controller_get_devdata(controller);
797
798 clk_disable_unprepare(fsl_lpspi->clk_per);
799 clk_disable_unprepare(fsl_lpspi->clk_ipg);
800
801 return 0;
802}
803#endif
804
805static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
806{
807 struct device *dev = fsl_lpspi->dev;
808
809 pm_runtime_enable(dev);
810 pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
811 pm_runtime_use_autosuspend(dev);
812
813 return 0;
814}
815
816static int fsl_lpspi_probe(struct platform_device *pdev)
817{
818 struct fsl_lpspi_data *fsl_lpspi;
819 struct spi_controller *controller;
820 struct resource *res;
821 int ret, irq;
822 u32 temp;
823 bool is_slave;
824
825 is_slave = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
826 if (is_slave)
827 controller = spi_alloc_slave(&pdev->dev,
828 sizeof(struct fsl_lpspi_data));
829 else
830 controller = spi_alloc_master(&pdev->dev,
831 sizeof(struct fsl_lpspi_data));
832
833 if (!controller)
834 return -ENOMEM;
835
836 platform_set_drvdata(pdev, controller);
837
838 fsl_lpspi = spi_controller_get_devdata(controller);
839 fsl_lpspi->dev = &pdev->dev;
840 fsl_lpspi->is_slave = is_slave;
841 fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
842 "fsl,spi-only-use-cs1-sel");
843
844 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
845 controller->transfer_one = fsl_lpspi_transfer_one;
846 controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
847 controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
848 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
849 controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
850 controller->dev.of_node = pdev->dev.of_node;
851 controller->bus_num = pdev->id;
852 controller->slave_abort = fsl_lpspi_slave_abort;
853 if (!fsl_lpspi->is_slave)
854 controller->use_gpio_descriptors = true;
855
856 init_completion(&fsl_lpspi->xfer_done);
857
858 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
859 fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res);
860 if (IS_ERR(fsl_lpspi->base)) {
861 ret = PTR_ERR(fsl_lpspi->base);
862 goto out_controller_put;
863 }
864 fsl_lpspi->base_phys = res->start;
865
866 irq = platform_get_irq(pdev, 0);
867 if (irq < 0) {
868 ret = irq;
869 goto out_controller_put;
870 }
871
872 ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
873 dev_name(&pdev->dev), fsl_lpspi);
874 if (ret) {
875 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
876 goto out_controller_put;
877 }
878
879 fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
880 if (IS_ERR(fsl_lpspi->clk_per)) {
881 ret = PTR_ERR(fsl_lpspi->clk_per);
882 goto out_controller_put;
883 }
884
885 fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
886 if (IS_ERR(fsl_lpspi->clk_ipg)) {
887 ret = PTR_ERR(fsl_lpspi->clk_ipg);
888 goto out_controller_put;
889 }
890
891 /* enable the clock */
892 ret = fsl_lpspi_init_rpm(fsl_lpspi);
893 if (ret)
894 goto out_controller_put;
895
896 ret = pm_runtime_get_sync(fsl_lpspi->dev);
897 if (ret < 0) {
898 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
899 goto out_pm_get;
900 }
901
902 temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
903 fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
904 fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
905
906 ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
907 if (ret == -EPROBE_DEFER)
908 goto out_pm_get;
909
910 if (ret < 0)
911 dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
912
913 ret = devm_spi_register_controller(&pdev->dev, controller);
914 if (ret < 0) {
915 dev_err(&pdev->dev, "spi_register_controller error.\n");
916 goto out_pm_get;
917 }
918
919 pm_runtime_mark_last_busy(fsl_lpspi->dev);
920 pm_runtime_put_autosuspend(fsl_lpspi->dev);
921
922 return 0;
923
924out_pm_get:
925 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
926 pm_runtime_put_sync(fsl_lpspi->dev);
927 pm_runtime_disable(fsl_lpspi->dev);
928out_controller_put:
929 spi_controller_put(controller);
930
931 return ret;
932}
933
934static int fsl_lpspi_remove(struct platform_device *pdev)
935{
936 struct spi_controller *controller = platform_get_drvdata(pdev);
937 struct fsl_lpspi_data *fsl_lpspi =
938 spi_controller_get_devdata(controller);
939
940 pm_runtime_disable(fsl_lpspi->dev);
941
942 spi_master_put(controller);
943
944 return 0;
945}
946
947#ifdef CONFIG_PM_SLEEP
948static int fsl_lpspi_suspend(struct device *dev)
949{
950 int ret;
951
952 pinctrl_pm_select_sleep_state(dev);
953 ret = pm_runtime_force_suspend(dev);
954 return ret;
955}
956
957static int fsl_lpspi_resume(struct device *dev)
958{
959 int ret;
960
961 ret = pm_runtime_force_resume(dev);
962 if (ret) {
963 dev_err(dev, "Error in resume: %d\n", ret);
964 return ret;
965 }
966
967 pinctrl_pm_select_default_state(dev);
968
969 return 0;
970}
971#endif /* CONFIG_PM_SLEEP */
972
973static const struct dev_pm_ops fsl_lpspi_pm_ops = {
974 SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
975 fsl_lpspi_runtime_resume, NULL)
976 SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
977};
978
979static struct platform_driver fsl_lpspi_driver = {
980 .driver = {
981 .name = DRIVER_NAME,
982 .of_match_table = fsl_lpspi_dt_ids,
983 .pm = &fsl_lpspi_pm_ops,
984 },
985 .probe = fsl_lpspi_probe,
986 .remove = fsl_lpspi_remove,
987};
988module_platform_driver(fsl_lpspi_driver);
989
990MODULE_DESCRIPTION("LPSPI Controller driver");
991MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
992MODULE_LICENSE("GPL");