Loading...
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2018 MediaTek Inc.
3
4#include <linux/clk.h>
5#include <linux/device.h>
6#include <linux/dma-mapping.h>
7#include <linux/err.h>
8#include <linux/interrupt.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#include <linux/spi/spi.h>
13#include <linux/of.h>
14
15
16#define SPIS_IRQ_EN_REG 0x0
17#define SPIS_IRQ_CLR_REG 0x4
18#define SPIS_IRQ_ST_REG 0x8
19#define SPIS_IRQ_MASK_REG 0xc
20#define SPIS_CFG_REG 0x10
21#define SPIS_RX_DATA_REG 0x14
22#define SPIS_TX_DATA_REG 0x18
23#define SPIS_RX_DST_REG 0x1c
24#define SPIS_TX_SRC_REG 0x20
25#define SPIS_DMA_CFG_REG 0x30
26#define SPIS_SOFT_RST_REG 0x40
27
28/* SPIS_IRQ_EN_REG */
29#define DMA_DONE_EN BIT(7)
30#define DATA_DONE_EN BIT(2)
31#define RSTA_DONE_EN BIT(1)
32#define CMD_INVALID_EN BIT(0)
33
34/* SPIS_IRQ_ST_REG */
35#define DMA_DONE_ST BIT(7)
36#define DATA_DONE_ST BIT(2)
37#define RSTA_DONE_ST BIT(1)
38#define CMD_INVALID_ST BIT(0)
39
40/* SPIS_IRQ_MASK_REG */
41#define DMA_DONE_MASK BIT(7)
42#define DATA_DONE_MASK BIT(2)
43#define RSTA_DONE_MASK BIT(1)
44#define CMD_INVALID_MASK BIT(0)
45
46/* SPIS_CFG_REG */
47#define SPIS_TX_ENDIAN BIT(7)
48#define SPIS_RX_ENDIAN BIT(6)
49#define SPIS_TXMSBF BIT(5)
50#define SPIS_RXMSBF BIT(4)
51#define SPIS_CPHA BIT(3)
52#define SPIS_CPOL BIT(2)
53#define SPIS_TX_EN BIT(1)
54#define SPIS_RX_EN BIT(0)
55
56/* SPIS_DMA_CFG_REG */
57#define TX_DMA_TRIG_EN BIT(31)
58#define TX_DMA_EN BIT(30)
59#define RX_DMA_EN BIT(29)
60#define TX_DMA_LEN 0xfffff
61
62/* SPIS_SOFT_RST_REG */
63#define SPIS_DMA_ADDR_EN BIT(1)
64#define SPIS_SOFT_RST BIT(0)
65
66struct mtk_spi_slave {
67 struct device *dev;
68 void __iomem *base;
69 struct clk *spi_clk;
70 struct completion xfer_done;
71 struct spi_transfer *cur_transfer;
72 bool slave_aborted;
73 const struct mtk_spi_compatible *dev_comp;
74};
75
76struct mtk_spi_compatible {
77 const u32 max_fifo_size;
78 bool must_rx;
79};
80
81static const struct mtk_spi_compatible mt2712_compat = {
82 .max_fifo_size = 512,
83};
84static const struct mtk_spi_compatible mt8195_compat = {
85 .max_fifo_size = 128,
86 .must_rx = true,
87};
88
89static const struct of_device_id mtk_spi_slave_of_match[] = {
90 { .compatible = "mediatek,mt2712-spi-slave",
91 .data = (void *)&mt2712_compat,},
92 { .compatible = "mediatek,mt8195-spi-slave",
93 .data = (void *)&mt8195_compat,},
94 {}
95};
96MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
97
98static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
99{
100 u32 reg_val;
101
102 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
103 reg_val &= ~RX_DMA_EN;
104 reg_val &= ~TX_DMA_EN;
105 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
106}
107
108static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
109{
110 u32 reg_val;
111
112 reg_val = readl(mdata->base + SPIS_CFG_REG);
113 reg_val &= ~SPIS_TX_EN;
114 reg_val &= ~SPIS_RX_EN;
115 writel(reg_val, mdata->base + SPIS_CFG_REG);
116}
117
118static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
119{
120 if (wait_for_completion_interruptible(&mdata->xfer_done) ||
121 mdata->slave_aborted) {
122 dev_err(mdata->dev, "interrupted\n");
123 return -EINTR;
124 }
125
126 return 0;
127}
128
129static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
130 struct spi_message *msg)
131{
132 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
133 struct spi_device *spi = msg->spi;
134 bool cpha, cpol;
135 u32 reg_val;
136
137 cpha = spi->mode & SPI_CPHA ? 1 : 0;
138 cpol = spi->mode & SPI_CPOL ? 1 : 0;
139
140 reg_val = readl(mdata->base + SPIS_CFG_REG);
141 if (cpha)
142 reg_val |= SPIS_CPHA;
143 else
144 reg_val &= ~SPIS_CPHA;
145 if (cpol)
146 reg_val |= SPIS_CPOL;
147 else
148 reg_val &= ~SPIS_CPOL;
149
150 if (spi->mode & SPI_LSB_FIRST)
151 reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
152 else
153 reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
154
155 reg_val &= ~SPIS_TX_ENDIAN;
156 reg_val &= ~SPIS_RX_ENDIAN;
157 writel(reg_val, mdata->base + SPIS_CFG_REG);
158
159 return 0;
160}
161
162static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
163 struct spi_device *spi,
164 struct spi_transfer *xfer)
165{
166 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
167 int reg_val, cnt, remainder, ret;
168
169 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
170
171 reg_val = readl(mdata->base + SPIS_CFG_REG);
172 if (xfer->rx_buf)
173 reg_val |= SPIS_RX_EN;
174 if (xfer->tx_buf)
175 reg_val |= SPIS_TX_EN;
176 writel(reg_val, mdata->base + SPIS_CFG_REG);
177
178 cnt = xfer->len / 4;
179 if (xfer->tx_buf)
180 iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
181 xfer->tx_buf, cnt);
182
183 remainder = xfer->len % 4;
184 if (xfer->tx_buf && remainder > 0) {
185 reg_val = 0;
186 memcpy(®_val, xfer->tx_buf + cnt * 4, remainder);
187 writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
188 }
189
190 ret = mtk_spi_slave_wait_for_completion(mdata);
191 if (ret) {
192 mtk_spi_slave_disable_xfer(mdata);
193 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
194 }
195
196 return ret;
197}
198
199static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
200 struct spi_device *spi,
201 struct spi_transfer *xfer)
202{
203 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
204 struct device *dev = mdata->dev;
205 int reg_val, ret;
206
207 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
208
209 if (xfer->tx_buf) {
210 /* tx_buf is a const void* where we need a void * for
211 * the dma mapping
212 */
213 void *nonconst_tx = (void *)xfer->tx_buf;
214
215 xfer->tx_dma = dma_map_single(dev, nonconst_tx,
216 xfer->len, DMA_TO_DEVICE);
217 if (dma_mapping_error(dev, xfer->tx_dma)) {
218 ret = -ENOMEM;
219 goto disable_transfer;
220 }
221 }
222
223 if (xfer->rx_buf) {
224 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
225 xfer->len, DMA_FROM_DEVICE);
226 if (dma_mapping_error(dev, xfer->rx_dma)) {
227 ret = -ENOMEM;
228 goto unmap_txdma;
229 }
230 }
231
232 writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
233 writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
234
235 writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
236
237 /* enable config reg tx rx_enable */
238 reg_val = readl(mdata->base + SPIS_CFG_REG);
239 if (xfer->tx_buf)
240 reg_val |= SPIS_TX_EN;
241 if (xfer->rx_buf)
242 reg_val |= SPIS_RX_EN;
243 writel(reg_val, mdata->base + SPIS_CFG_REG);
244
245 /* config dma */
246 reg_val = 0;
247 reg_val |= (xfer->len - 1) & TX_DMA_LEN;
248 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
249
250 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
251 if (xfer->tx_buf)
252 reg_val |= TX_DMA_EN;
253 if (xfer->rx_buf)
254 reg_val |= RX_DMA_EN;
255 reg_val |= TX_DMA_TRIG_EN;
256 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
257
258 ret = mtk_spi_slave_wait_for_completion(mdata);
259 if (ret)
260 goto unmap_rxdma;
261
262 return 0;
263
264unmap_rxdma:
265 if (xfer->rx_buf)
266 dma_unmap_single(dev, xfer->rx_dma,
267 xfer->len, DMA_FROM_DEVICE);
268
269unmap_txdma:
270 if (xfer->tx_buf)
271 dma_unmap_single(dev, xfer->tx_dma,
272 xfer->len, DMA_TO_DEVICE);
273
274disable_transfer:
275 mtk_spi_slave_disable_dma(mdata);
276 mtk_spi_slave_disable_xfer(mdata);
277 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
278
279 return ret;
280}
281
282static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
283 struct spi_device *spi,
284 struct spi_transfer *xfer)
285{
286 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
287
288 reinit_completion(&mdata->xfer_done);
289 mdata->slave_aborted = false;
290 mdata->cur_transfer = xfer;
291
292 if (xfer->len > mdata->dev_comp->max_fifo_size)
293 return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
294 else
295 return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
296}
297
298static int mtk_spi_slave_setup(struct spi_device *spi)
299{
300 struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
301 u32 reg_val;
302
303 reg_val = DMA_DONE_EN | DATA_DONE_EN |
304 RSTA_DONE_EN | CMD_INVALID_EN;
305 writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
306
307 reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
308 RSTA_DONE_MASK | CMD_INVALID_MASK;
309 writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
310
311 mtk_spi_slave_disable_dma(mdata);
312 mtk_spi_slave_disable_xfer(mdata);
313
314 return 0;
315}
316
317static int mtk_slave_abort(struct spi_controller *ctlr)
318{
319 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
320
321 mdata->slave_aborted = true;
322 complete(&mdata->xfer_done);
323
324 return 0;
325}
326
327static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
328{
329 struct spi_controller *ctlr = dev_id;
330 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
331 struct spi_transfer *trans = mdata->cur_transfer;
332 u32 int_status, reg_val, cnt, remainder;
333
334 int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
335 writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
336
337 if (!trans)
338 return IRQ_NONE;
339
340 if ((int_status & DMA_DONE_ST) &&
341 ((int_status & DATA_DONE_ST) ||
342 (int_status & RSTA_DONE_ST))) {
343 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
344
345 if (trans->tx_buf)
346 dma_unmap_single(mdata->dev, trans->tx_dma,
347 trans->len, DMA_TO_DEVICE);
348 if (trans->rx_buf)
349 dma_unmap_single(mdata->dev, trans->rx_dma,
350 trans->len, DMA_FROM_DEVICE);
351
352 mtk_spi_slave_disable_dma(mdata);
353 mtk_spi_slave_disable_xfer(mdata);
354 }
355
356 if ((!(int_status & DMA_DONE_ST)) &&
357 ((int_status & DATA_DONE_ST) ||
358 (int_status & RSTA_DONE_ST))) {
359 cnt = trans->len / 4;
360 if (trans->rx_buf)
361 ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
362 trans->rx_buf, cnt);
363 remainder = trans->len % 4;
364 if (trans->rx_buf && remainder > 0) {
365 reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
366 memcpy(trans->rx_buf + (cnt * 4),
367 ®_val, remainder);
368 }
369
370 mtk_spi_slave_disable_xfer(mdata);
371 }
372
373 if (int_status & CMD_INVALID_ST) {
374 dev_warn(&ctlr->dev, "cmd invalid\n");
375 return IRQ_NONE;
376 }
377
378 mdata->cur_transfer = NULL;
379 complete(&mdata->xfer_done);
380
381 return IRQ_HANDLED;
382}
383
384static int mtk_spi_slave_probe(struct platform_device *pdev)
385{
386 struct spi_controller *ctlr;
387 struct mtk_spi_slave *mdata;
388 int irq, ret;
389 const struct of_device_id *of_id;
390
391 ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
392 if (!ctlr) {
393 dev_err(&pdev->dev, "failed to alloc spi slave\n");
394 return -ENOMEM;
395 }
396
397 ctlr->auto_runtime_pm = true;
398 ctlr->dev.of_node = pdev->dev.of_node;
399 ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
400 ctlr->mode_bits |= SPI_LSB_FIRST;
401
402 ctlr->prepare_message = mtk_spi_slave_prepare_message;
403 ctlr->transfer_one = mtk_spi_slave_transfer_one;
404 ctlr->setup = mtk_spi_slave_setup;
405 ctlr->slave_abort = mtk_slave_abort;
406
407 of_id = of_match_node(mtk_spi_slave_of_match, pdev->dev.of_node);
408 if (!of_id) {
409 dev_err(&pdev->dev, "failed to probe of_node\n");
410 ret = -EINVAL;
411 goto err_put_ctlr;
412 }
413 mdata = spi_controller_get_devdata(ctlr);
414 mdata->dev_comp = of_id->data;
415
416 if (mdata->dev_comp->must_rx)
417 ctlr->flags = SPI_MASTER_MUST_RX;
418
419 platform_set_drvdata(pdev, ctlr);
420
421 init_completion(&mdata->xfer_done);
422 mdata->dev = &pdev->dev;
423 mdata->base = devm_platform_ioremap_resource(pdev, 0);
424 if (IS_ERR(mdata->base)) {
425 ret = PTR_ERR(mdata->base);
426 goto err_put_ctlr;
427 }
428
429 irq = platform_get_irq(pdev, 0);
430 if (irq < 0) {
431 ret = irq;
432 goto err_put_ctlr;
433 }
434
435 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
436 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
437 if (ret) {
438 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
439 goto err_put_ctlr;
440 }
441
442 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
443 if (IS_ERR(mdata->spi_clk)) {
444 ret = PTR_ERR(mdata->spi_clk);
445 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
446 goto err_put_ctlr;
447 }
448
449 ret = clk_prepare_enable(mdata->spi_clk);
450 if (ret < 0) {
451 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
452 goto err_put_ctlr;
453 }
454
455 pm_runtime_enable(&pdev->dev);
456
457 ret = devm_spi_register_controller(&pdev->dev, ctlr);
458 if (ret) {
459 dev_err(&pdev->dev,
460 "failed to register slave controller(%d)\n", ret);
461 clk_disable_unprepare(mdata->spi_clk);
462 goto err_disable_runtime_pm;
463 }
464
465 clk_disable_unprepare(mdata->spi_clk);
466
467 return 0;
468
469err_disable_runtime_pm:
470 pm_runtime_disable(&pdev->dev);
471err_put_ctlr:
472 spi_controller_put(ctlr);
473
474 return ret;
475}
476
477static int mtk_spi_slave_remove(struct platform_device *pdev)
478{
479 pm_runtime_disable(&pdev->dev);
480
481 return 0;
482}
483
484#ifdef CONFIG_PM_SLEEP
485static int mtk_spi_slave_suspend(struct device *dev)
486{
487 struct spi_controller *ctlr = dev_get_drvdata(dev);
488 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
489 int ret;
490
491 ret = spi_controller_suspend(ctlr);
492 if (ret)
493 return ret;
494
495 if (!pm_runtime_suspended(dev))
496 clk_disable_unprepare(mdata->spi_clk);
497
498 return ret;
499}
500
501static int mtk_spi_slave_resume(struct device *dev)
502{
503 struct spi_controller *ctlr = dev_get_drvdata(dev);
504 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
505 int ret;
506
507 if (!pm_runtime_suspended(dev)) {
508 ret = clk_prepare_enable(mdata->spi_clk);
509 if (ret < 0) {
510 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
511 return ret;
512 }
513 }
514
515 ret = spi_controller_resume(ctlr);
516 if (ret < 0)
517 clk_disable_unprepare(mdata->spi_clk);
518
519 return ret;
520}
521#endif /* CONFIG_PM_SLEEP */
522
523#ifdef CONFIG_PM
524static int mtk_spi_slave_runtime_suspend(struct device *dev)
525{
526 struct spi_controller *ctlr = dev_get_drvdata(dev);
527 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
528
529 clk_disable_unprepare(mdata->spi_clk);
530
531 return 0;
532}
533
534static int mtk_spi_slave_runtime_resume(struct device *dev)
535{
536 struct spi_controller *ctlr = dev_get_drvdata(dev);
537 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
538 int ret;
539
540 ret = clk_prepare_enable(mdata->spi_clk);
541 if (ret < 0) {
542 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
543 return ret;
544 }
545
546 return 0;
547}
548#endif /* CONFIG_PM */
549
550static const struct dev_pm_ops mtk_spi_slave_pm = {
551 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
552 SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
553 mtk_spi_slave_runtime_resume, NULL)
554};
555
556static struct platform_driver mtk_spi_slave_driver = {
557 .driver = {
558 .name = "mtk-spi-slave",
559 .pm = &mtk_spi_slave_pm,
560 .of_match_table = mtk_spi_slave_of_match,
561 },
562 .probe = mtk_spi_slave_probe,
563 .remove = mtk_spi_slave_remove,
564};
565
566module_platform_driver(mtk_spi_slave_driver);
567
568MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
569MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
570MODULE_LICENSE("GPL v2");
571MODULE_ALIAS("platform:mtk-spi-slave");
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2018 MediaTek Inc.
3
4#include <linux/clk.h>
5#include <linux/device.h>
6#include <linux/dma-mapping.h>
7#include <linux/err.h>
8#include <linux/interrupt.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#include <linux/spi/spi.h>
13
14#define SPIS_IRQ_EN_REG 0x0
15#define SPIS_IRQ_CLR_REG 0x4
16#define SPIS_IRQ_ST_REG 0x8
17#define SPIS_IRQ_MASK_REG 0xc
18#define SPIS_CFG_REG 0x10
19#define SPIS_RX_DATA_REG 0x14
20#define SPIS_TX_DATA_REG 0x18
21#define SPIS_RX_DST_REG 0x1c
22#define SPIS_TX_SRC_REG 0x20
23#define SPIS_DMA_CFG_REG 0x30
24#define SPIS_SOFT_RST_REG 0x40
25
26/* SPIS_IRQ_EN_REG */
27#define DMA_DONE_EN BIT(7)
28#define DATA_DONE_EN BIT(2)
29#define RSTA_DONE_EN BIT(1)
30#define CMD_INVALID_EN BIT(0)
31
32/* SPIS_IRQ_ST_REG */
33#define DMA_DONE_ST BIT(7)
34#define DATA_DONE_ST BIT(2)
35#define RSTA_DONE_ST BIT(1)
36#define CMD_INVALID_ST BIT(0)
37
38/* SPIS_IRQ_MASK_REG */
39#define DMA_DONE_MASK BIT(7)
40#define DATA_DONE_MASK BIT(2)
41#define RSTA_DONE_MASK BIT(1)
42#define CMD_INVALID_MASK BIT(0)
43
44/* SPIS_CFG_REG */
45#define SPIS_TX_ENDIAN BIT(7)
46#define SPIS_RX_ENDIAN BIT(6)
47#define SPIS_TXMSBF BIT(5)
48#define SPIS_RXMSBF BIT(4)
49#define SPIS_CPHA BIT(3)
50#define SPIS_CPOL BIT(2)
51#define SPIS_TX_EN BIT(1)
52#define SPIS_RX_EN BIT(0)
53
54/* SPIS_DMA_CFG_REG */
55#define TX_DMA_TRIG_EN BIT(31)
56#define TX_DMA_EN BIT(30)
57#define RX_DMA_EN BIT(29)
58#define TX_DMA_LEN 0xfffff
59
60/* SPIS_SOFT_RST_REG */
61#define SPIS_DMA_ADDR_EN BIT(1)
62#define SPIS_SOFT_RST BIT(0)
63
64#define MTK_SPI_SLAVE_MAX_FIFO_SIZE 512U
65
66struct mtk_spi_slave {
67 struct device *dev;
68 void __iomem *base;
69 struct clk *spi_clk;
70 struct completion xfer_done;
71 struct spi_transfer *cur_transfer;
72 bool slave_aborted;
73};
74
75static const struct of_device_id mtk_spi_slave_of_match[] = {
76 { .compatible = "mediatek,mt2712-spi-slave", },
77 {}
78};
79MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
80
81static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
82{
83 u32 reg_val;
84
85 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
86 reg_val &= ~RX_DMA_EN;
87 reg_val &= ~TX_DMA_EN;
88 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
89}
90
91static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
92{
93 u32 reg_val;
94
95 reg_val = readl(mdata->base + SPIS_CFG_REG);
96 reg_val &= ~SPIS_TX_EN;
97 reg_val &= ~SPIS_RX_EN;
98 writel(reg_val, mdata->base + SPIS_CFG_REG);
99}
100
101static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
102{
103 if (wait_for_completion_interruptible(&mdata->xfer_done) ||
104 mdata->slave_aborted) {
105 dev_err(mdata->dev, "interrupted\n");
106 return -EINTR;
107 }
108
109 return 0;
110}
111
112static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
113 struct spi_message *msg)
114{
115 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
116 struct spi_device *spi = msg->spi;
117 bool cpha, cpol;
118 u32 reg_val;
119
120 cpha = spi->mode & SPI_CPHA ? 1 : 0;
121 cpol = spi->mode & SPI_CPOL ? 1 : 0;
122
123 reg_val = readl(mdata->base + SPIS_CFG_REG);
124 if (cpha)
125 reg_val |= SPIS_CPHA;
126 else
127 reg_val &= ~SPIS_CPHA;
128 if (cpol)
129 reg_val |= SPIS_CPOL;
130 else
131 reg_val &= ~SPIS_CPOL;
132
133 if (spi->mode & SPI_LSB_FIRST)
134 reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
135 else
136 reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
137
138 reg_val &= ~SPIS_TX_ENDIAN;
139 reg_val &= ~SPIS_RX_ENDIAN;
140 writel(reg_val, mdata->base + SPIS_CFG_REG);
141
142 return 0;
143}
144
145static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
146 struct spi_device *spi,
147 struct spi_transfer *xfer)
148{
149 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
150 int reg_val, cnt, remainder, ret;
151
152 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
153
154 reg_val = readl(mdata->base + SPIS_CFG_REG);
155 if (xfer->rx_buf)
156 reg_val |= SPIS_RX_EN;
157 if (xfer->tx_buf)
158 reg_val |= SPIS_TX_EN;
159 writel(reg_val, mdata->base + SPIS_CFG_REG);
160
161 cnt = xfer->len / 4;
162 if (xfer->tx_buf)
163 iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
164 xfer->tx_buf, cnt);
165
166 remainder = xfer->len % 4;
167 if (xfer->tx_buf && remainder > 0) {
168 reg_val = 0;
169 memcpy(®_val, xfer->tx_buf + cnt * 4, remainder);
170 writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
171 }
172
173 ret = mtk_spi_slave_wait_for_completion(mdata);
174 if (ret) {
175 mtk_spi_slave_disable_xfer(mdata);
176 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
177 }
178
179 return ret;
180}
181
182static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
183 struct spi_device *spi,
184 struct spi_transfer *xfer)
185{
186 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
187 struct device *dev = mdata->dev;
188 int reg_val, ret;
189
190 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
191
192 if (xfer->tx_buf) {
193 /* tx_buf is a const void* where we need a void * for
194 * the dma mapping
195 */
196 void *nonconst_tx = (void *)xfer->tx_buf;
197
198 xfer->tx_dma = dma_map_single(dev, nonconst_tx,
199 xfer->len, DMA_TO_DEVICE);
200 if (dma_mapping_error(dev, xfer->tx_dma)) {
201 ret = -ENOMEM;
202 goto disable_transfer;
203 }
204 }
205
206 if (xfer->rx_buf) {
207 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
208 xfer->len, DMA_FROM_DEVICE);
209 if (dma_mapping_error(dev, xfer->rx_dma)) {
210 ret = -ENOMEM;
211 goto unmap_txdma;
212 }
213 }
214
215 writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
216 writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
217
218 writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
219
220 /* enable config reg tx rx_enable */
221 reg_val = readl(mdata->base + SPIS_CFG_REG);
222 if (xfer->tx_buf)
223 reg_val |= SPIS_TX_EN;
224 if (xfer->rx_buf)
225 reg_val |= SPIS_RX_EN;
226 writel(reg_val, mdata->base + SPIS_CFG_REG);
227
228 /* config dma */
229 reg_val = 0;
230 reg_val |= (xfer->len - 1) & TX_DMA_LEN;
231 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
232
233 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
234 if (xfer->tx_buf)
235 reg_val |= TX_DMA_EN;
236 if (xfer->rx_buf)
237 reg_val |= RX_DMA_EN;
238 reg_val |= TX_DMA_TRIG_EN;
239 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
240
241 ret = mtk_spi_slave_wait_for_completion(mdata);
242 if (ret)
243 goto unmap_rxdma;
244
245 return 0;
246
247unmap_rxdma:
248 if (xfer->rx_buf)
249 dma_unmap_single(dev, xfer->rx_dma,
250 xfer->len, DMA_FROM_DEVICE);
251
252unmap_txdma:
253 if (xfer->tx_buf)
254 dma_unmap_single(dev, xfer->tx_dma,
255 xfer->len, DMA_TO_DEVICE);
256
257disable_transfer:
258 mtk_spi_slave_disable_dma(mdata);
259 mtk_spi_slave_disable_xfer(mdata);
260 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
261
262 return ret;
263}
264
265static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
266 struct spi_device *spi,
267 struct spi_transfer *xfer)
268{
269 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
270
271 reinit_completion(&mdata->xfer_done);
272 mdata->slave_aborted = false;
273 mdata->cur_transfer = xfer;
274
275 if (xfer->len > MTK_SPI_SLAVE_MAX_FIFO_SIZE)
276 return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
277 else
278 return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
279}
280
281static int mtk_spi_slave_setup(struct spi_device *spi)
282{
283 struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
284 u32 reg_val;
285
286 reg_val = DMA_DONE_EN | DATA_DONE_EN |
287 RSTA_DONE_EN | CMD_INVALID_EN;
288 writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
289
290 reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
291 RSTA_DONE_MASK | CMD_INVALID_MASK;
292 writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
293
294 mtk_spi_slave_disable_dma(mdata);
295 mtk_spi_slave_disable_xfer(mdata);
296
297 return 0;
298}
299
300static int mtk_slave_abort(struct spi_controller *ctlr)
301{
302 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
303
304 mdata->slave_aborted = true;
305 complete(&mdata->xfer_done);
306
307 return 0;
308}
309
310static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
311{
312 struct spi_controller *ctlr = dev_id;
313 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
314 struct spi_transfer *trans = mdata->cur_transfer;
315 u32 int_status, reg_val, cnt, remainder;
316
317 int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
318 writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
319
320 if (!trans)
321 return IRQ_NONE;
322
323 if ((int_status & DMA_DONE_ST) &&
324 ((int_status & DATA_DONE_ST) ||
325 (int_status & RSTA_DONE_ST))) {
326 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
327
328 if (trans->tx_buf)
329 dma_unmap_single(mdata->dev, trans->tx_dma,
330 trans->len, DMA_TO_DEVICE);
331 if (trans->rx_buf)
332 dma_unmap_single(mdata->dev, trans->rx_dma,
333 trans->len, DMA_FROM_DEVICE);
334
335 mtk_spi_slave_disable_dma(mdata);
336 mtk_spi_slave_disable_xfer(mdata);
337 }
338
339 if ((!(int_status & DMA_DONE_ST)) &&
340 ((int_status & DATA_DONE_ST) ||
341 (int_status & RSTA_DONE_ST))) {
342 cnt = trans->len / 4;
343 if (trans->rx_buf)
344 ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
345 trans->rx_buf, cnt);
346 remainder = trans->len % 4;
347 if (trans->rx_buf && remainder > 0) {
348 reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
349 memcpy(trans->rx_buf + (cnt * 4),
350 ®_val, remainder);
351 }
352
353 mtk_spi_slave_disable_xfer(mdata);
354 }
355
356 if (int_status & CMD_INVALID_ST) {
357 dev_warn(&ctlr->dev, "cmd invalid\n");
358 return IRQ_NONE;
359 }
360
361 mdata->cur_transfer = NULL;
362 complete(&mdata->xfer_done);
363
364 return IRQ_HANDLED;
365}
366
367static int mtk_spi_slave_probe(struct platform_device *pdev)
368{
369 struct spi_controller *ctlr;
370 struct mtk_spi_slave *mdata;
371 struct resource *res;
372 int irq, ret;
373
374 ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
375 if (!ctlr) {
376 dev_err(&pdev->dev, "failed to alloc spi slave\n");
377 return -ENOMEM;
378 }
379
380 ctlr->auto_runtime_pm = true;
381 ctlr->dev.of_node = pdev->dev.of_node;
382 ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
383 ctlr->mode_bits |= SPI_LSB_FIRST;
384
385 ctlr->prepare_message = mtk_spi_slave_prepare_message;
386 ctlr->transfer_one = mtk_spi_slave_transfer_one;
387 ctlr->setup = mtk_spi_slave_setup;
388 ctlr->slave_abort = mtk_slave_abort;
389
390 mdata = spi_controller_get_devdata(ctlr);
391
392 platform_set_drvdata(pdev, ctlr);
393
394 init_completion(&mdata->xfer_done);
395
396 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
397 if (!res) {
398 ret = -ENODEV;
399 dev_err(&pdev->dev, "failed to determine base address\n");
400 goto err_put_ctlr;
401 }
402
403 mdata->dev = &pdev->dev;
404
405 mdata->base = devm_ioremap_resource(&pdev->dev, res);
406 if (IS_ERR(mdata->base)) {
407 ret = PTR_ERR(mdata->base);
408 goto err_put_ctlr;
409 }
410
411 irq = platform_get_irq(pdev, 0);
412 if (irq < 0) {
413 ret = irq;
414 goto err_put_ctlr;
415 }
416
417 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
418 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
419 if (ret) {
420 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
421 goto err_put_ctlr;
422 }
423
424 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
425 if (IS_ERR(mdata->spi_clk)) {
426 ret = PTR_ERR(mdata->spi_clk);
427 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
428 goto err_put_ctlr;
429 }
430
431 ret = clk_prepare_enable(mdata->spi_clk);
432 if (ret < 0) {
433 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
434 goto err_put_ctlr;
435 }
436
437 pm_runtime_enable(&pdev->dev);
438
439 ret = devm_spi_register_controller(&pdev->dev, ctlr);
440 if (ret) {
441 dev_err(&pdev->dev,
442 "failed to register slave controller(%d)\n", ret);
443 clk_disable_unprepare(mdata->spi_clk);
444 goto err_disable_runtime_pm;
445 }
446
447 clk_disable_unprepare(mdata->spi_clk);
448
449 return 0;
450
451err_disable_runtime_pm:
452 pm_runtime_disable(&pdev->dev);
453err_put_ctlr:
454 spi_controller_put(ctlr);
455
456 return ret;
457}
458
459static int mtk_spi_slave_remove(struct platform_device *pdev)
460{
461 pm_runtime_disable(&pdev->dev);
462
463 return 0;
464}
465
466#ifdef CONFIG_PM_SLEEP
467static int mtk_spi_slave_suspend(struct device *dev)
468{
469 struct spi_controller *ctlr = dev_get_drvdata(dev);
470 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
471 int ret;
472
473 ret = spi_controller_suspend(ctlr);
474 if (ret)
475 return ret;
476
477 if (!pm_runtime_suspended(dev))
478 clk_disable_unprepare(mdata->spi_clk);
479
480 return ret;
481}
482
483static int mtk_spi_slave_resume(struct device *dev)
484{
485 struct spi_controller *ctlr = dev_get_drvdata(dev);
486 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
487 int ret;
488
489 if (!pm_runtime_suspended(dev)) {
490 ret = clk_prepare_enable(mdata->spi_clk);
491 if (ret < 0) {
492 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
493 return ret;
494 }
495 }
496
497 ret = spi_controller_resume(ctlr);
498 if (ret < 0)
499 clk_disable_unprepare(mdata->spi_clk);
500
501 return ret;
502}
503#endif /* CONFIG_PM_SLEEP */
504
505#ifdef CONFIG_PM
506static int mtk_spi_slave_runtime_suspend(struct device *dev)
507{
508 struct spi_controller *ctlr = dev_get_drvdata(dev);
509 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
510
511 clk_disable_unprepare(mdata->spi_clk);
512
513 return 0;
514}
515
516static int mtk_spi_slave_runtime_resume(struct device *dev)
517{
518 struct spi_controller *ctlr = dev_get_drvdata(dev);
519 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
520 int ret;
521
522 ret = clk_prepare_enable(mdata->spi_clk);
523 if (ret < 0) {
524 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
525 return ret;
526 }
527
528 return 0;
529}
530#endif /* CONFIG_PM */
531
532static const struct dev_pm_ops mtk_spi_slave_pm = {
533 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
534 SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
535 mtk_spi_slave_runtime_resume, NULL)
536};
537
538static struct platform_driver mtk_spi_slave_driver = {
539 .driver = {
540 .name = "mtk-spi-slave",
541 .pm = &mtk_spi_slave_pm,
542 .of_match_table = mtk_spi_slave_of_match,
543 },
544 .probe = mtk_spi_slave_probe,
545 .remove = mtk_spi_slave_remove,
546};
547
548module_platform_driver(mtk_spi_slave_driver);
549
550MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
551MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
552MODULE_LICENSE("GPL v2");
553MODULE_ALIAS("platform:mtk-spi-slave");