Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SPI-Engine SPI controller driver
4 * Copyright 2015 Analog Devices Inc.
5 * Author: Lars-Peter Clausen <lars@metafoo.de>
6 */
7
8#include <linux/clk.h>
9#include <linux/completion.h>
10#include <linux/fpga/adi-axi-common.h>
11#include <linux/interrupt.h>
12#include <linux/io.h>
13#include <linux/of.h>
14#include <linux/module.h>
15#include <linux/overflow.h>
16#include <linux/platform_device.h>
17#include <linux/spi/spi.h>
18#include <trace/events/spi.h>
19
20#define SPI_ENGINE_REG_RESET 0x40
21
22#define SPI_ENGINE_REG_INT_ENABLE 0x80
23#define SPI_ENGINE_REG_INT_PENDING 0x84
24#define SPI_ENGINE_REG_INT_SOURCE 0x88
25
26#define SPI_ENGINE_REG_SYNC_ID 0xc0
27
28#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
29#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
30#define SPI_ENGINE_REG_SDI_FIFO_LEVEL 0xd8
31
32#define SPI_ENGINE_REG_CMD_FIFO 0xe0
33#define SPI_ENGINE_REG_SDO_DATA_FIFO 0xe4
34#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
35#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
36
37#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
38#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
39#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
40#define SPI_ENGINE_INT_SYNC BIT(3)
41
42#define SPI_ENGINE_CONFIG_CPHA BIT(0)
43#define SPI_ENGINE_CONFIG_CPOL BIT(1)
44#define SPI_ENGINE_CONFIG_3WIRE BIT(2)
45#define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH BIT(3)
46
47#define SPI_ENGINE_INST_TRANSFER 0x0
48#define SPI_ENGINE_INST_ASSERT 0x1
49#define SPI_ENGINE_INST_WRITE 0x2
50#define SPI_ENGINE_INST_MISC 0x3
51#define SPI_ENGINE_INST_CS_INV 0x4
52
53#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
54#define SPI_ENGINE_CMD_REG_CONFIG 0x1
55#define SPI_ENGINE_CMD_REG_XFER_BITS 0x2
56
57#define SPI_ENGINE_MISC_SYNC 0x0
58#define SPI_ENGINE_MISC_SLEEP 0x1
59
60#define SPI_ENGINE_TRANSFER_WRITE 0x1
61#define SPI_ENGINE_TRANSFER_READ 0x2
62
63/* Arbitrary sync ID for use by host->cur_msg */
64#define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID 0x1
65
66#define SPI_ENGINE_CMD(inst, arg1, arg2) \
67 (((inst) << 12) | ((arg1) << 8) | (arg2))
68
69#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
70 SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
71#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
72 SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
73#define SPI_ENGINE_CMD_WRITE(reg, val) \
74 SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
75#define SPI_ENGINE_CMD_SLEEP(delay) \
76 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
77#define SPI_ENGINE_CMD_SYNC(id) \
78 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
79#define SPI_ENGINE_CMD_CS_INV(flags) \
80 SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
81
82struct spi_engine_program {
83 unsigned int length;
84 uint16_t instructions[] __counted_by(length);
85};
86
87/**
88 * struct spi_engine_message_state - SPI engine per-message state
89 */
90struct spi_engine_message_state {
91 /** @cmd_length: Number of elements in cmd_buf array. */
92 unsigned cmd_length;
93 /** @cmd_buf: Array of commands not yet written to CMD FIFO. */
94 const uint16_t *cmd_buf;
95 /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
96 struct spi_transfer *tx_xfer;
97 /** @tx_length: Size of tx_buf in bytes. */
98 unsigned int tx_length;
99 /** @tx_buf: Bytes not yet written to TX FIFO. */
100 const uint8_t *tx_buf;
101 /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
102 struct spi_transfer *rx_xfer;
103 /** @rx_length: Size of tx_buf in bytes. */
104 unsigned int rx_length;
105 /** @rx_buf: Bytes not yet written to the RX FIFO. */
106 uint8_t *rx_buf;
107};
108
109struct spi_engine {
110 struct clk *clk;
111 struct clk *ref_clk;
112
113 spinlock_t lock;
114
115 void __iomem *base;
116 struct spi_engine_message_state msg_state;
117 struct completion msg_complete;
118 unsigned int int_enable;
119 /* shadows hardware CS inversion flag state */
120 u8 cs_inv;
121};
122
123static void spi_engine_program_add_cmd(struct spi_engine_program *p,
124 bool dry, uint16_t cmd)
125{
126 p->length++;
127
128 if (!dry)
129 p->instructions[p->length - 1] = cmd;
130}
131
132static unsigned int spi_engine_get_config(struct spi_device *spi)
133{
134 unsigned int config = 0;
135
136 if (spi->mode & SPI_CPOL)
137 config |= SPI_ENGINE_CONFIG_CPOL;
138 if (spi->mode & SPI_CPHA)
139 config |= SPI_ENGINE_CONFIG_CPHA;
140 if (spi->mode & SPI_3WIRE)
141 config |= SPI_ENGINE_CONFIG_3WIRE;
142 if (spi->mode & SPI_MOSI_IDLE_HIGH)
143 config |= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
144 if (spi->mode & SPI_MOSI_IDLE_LOW)
145 config &= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
146
147 return config;
148}
149
150static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
151 struct spi_transfer *xfer)
152{
153 unsigned int len;
154
155 if (xfer->bits_per_word <= 8)
156 len = xfer->len;
157 else if (xfer->bits_per_word <= 16)
158 len = xfer->len / 2;
159 else
160 len = xfer->len / 4;
161
162 while (len) {
163 unsigned int n = min(len, 256U);
164 unsigned int flags = 0;
165
166 if (xfer->tx_buf)
167 flags |= SPI_ENGINE_TRANSFER_WRITE;
168 if (xfer->rx_buf)
169 flags |= SPI_ENGINE_TRANSFER_READ;
170
171 spi_engine_program_add_cmd(p, dry,
172 SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
173 len -= n;
174 }
175}
176
177static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
178 int delay_ns, int inst_ns, u32 sclk_hz)
179{
180 unsigned int t;
181
182 /*
183 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
184 * delay is less that the instruction execution time, there is no need
185 * for an extra sleep instruction since the instruction execution time
186 * will already cover the required delay.
187 */
188 if (delay_ns < 0 || delay_ns <= inst_ns)
189 return;
190
191 t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
192 while (t) {
193 unsigned int n = min(t, 256U);
194
195 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
196 t -= n;
197 }
198}
199
200static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
201 struct spi_device *spi, bool assert)
202{
203 unsigned int mask = 0xff;
204
205 if (assert)
206 mask ^= BIT(spi_get_chipselect(spi, 0));
207
208 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
209}
210
211/*
212 * Performs precompile steps on the message.
213 *
214 * The SPI core does most of the message/transfer validation and filling in
215 * fields for us via __spi_validate(). This fixes up anything remaining not
216 * done there.
217 *
218 * NB: This is separate from spi_engine_compile_message() because the latter
219 * is called twice and would otherwise result in double-evaluation.
220 */
221static void spi_engine_precompile_message(struct spi_message *msg)
222{
223 unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
224 struct spi_transfer *xfer;
225
226 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
227 clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
228 xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
229 }
230}
231
232static void spi_engine_compile_message(struct spi_message *msg, bool dry,
233 struct spi_engine_program *p)
234{
235 struct spi_device *spi = msg->spi;
236 struct spi_controller *host = spi->controller;
237 struct spi_transfer *xfer;
238 int clk_div, new_clk_div, inst_ns;
239 bool keep_cs = false;
240 u8 bits_per_word = 0;
241
242 /*
243 * Take into account instruction execution time for more accurate sleep
244 * times, especially when the delay is small.
245 */
246 inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
247
248 clk_div = 1;
249
250 spi_engine_program_add_cmd(p, dry,
251 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
252 spi_engine_get_config(spi)));
253
254 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
255 spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
256
257 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
258 new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
259 if (new_clk_div != clk_div) {
260 clk_div = new_clk_div;
261 /* actual divider used is register value + 1 */
262 spi_engine_program_add_cmd(p, dry,
263 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
264 clk_div - 1));
265 }
266
267 if (bits_per_word != xfer->bits_per_word && xfer->len) {
268 bits_per_word = xfer->bits_per_word;
269 spi_engine_program_add_cmd(p, dry,
270 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
271 bits_per_word));
272 }
273
274 spi_engine_gen_xfer(p, dry, xfer);
275 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
276 inst_ns, xfer->effective_speed_hz);
277
278 if (xfer->cs_change) {
279 if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
280 keep_cs = true;
281 } else {
282 if (!xfer->cs_off)
283 spi_engine_gen_cs(p, dry, spi, false);
284
285 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
286 &xfer->cs_change_delay, xfer), inst_ns,
287 xfer->effective_speed_hz);
288
289 if (!list_next_entry(xfer, transfer_list)->cs_off)
290 spi_engine_gen_cs(p, dry, spi, true);
291 }
292 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
293 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
294 spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
295 }
296 }
297
298 if (!keep_cs)
299 spi_engine_gen_cs(p, dry, spi, false);
300
301 /*
302 * Restore clockdiv to default so that future gen_sleep commands don't
303 * have to be aware of the current register state.
304 */
305 if (clk_div != 1)
306 spi_engine_program_add_cmd(p, dry,
307 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
308}
309
310static void spi_engine_xfer_next(struct spi_message *msg,
311 struct spi_transfer **_xfer)
312{
313 struct spi_transfer *xfer = *_xfer;
314
315 if (!xfer) {
316 xfer = list_first_entry(&msg->transfers,
317 struct spi_transfer, transfer_list);
318 } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
319 xfer = NULL;
320 } else {
321 xfer = list_next_entry(xfer, transfer_list);
322 }
323
324 *_xfer = xfer;
325}
326
327static void spi_engine_tx_next(struct spi_message *msg)
328{
329 struct spi_engine_message_state *st = msg->state;
330 struct spi_transfer *xfer = st->tx_xfer;
331
332 do {
333 spi_engine_xfer_next(msg, &xfer);
334 } while (xfer && !xfer->tx_buf);
335
336 st->tx_xfer = xfer;
337 if (xfer) {
338 st->tx_length = xfer->len;
339 st->tx_buf = xfer->tx_buf;
340 } else {
341 st->tx_buf = NULL;
342 }
343}
344
345static void spi_engine_rx_next(struct spi_message *msg)
346{
347 struct spi_engine_message_state *st = msg->state;
348 struct spi_transfer *xfer = st->rx_xfer;
349
350 do {
351 spi_engine_xfer_next(msg, &xfer);
352 } while (xfer && !xfer->rx_buf);
353
354 st->rx_xfer = xfer;
355 if (xfer) {
356 st->rx_length = xfer->len;
357 st->rx_buf = xfer->rx_buf;
358 } else {
359 st->rx_buf = NULL;
360 }
361}
362
363static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
364 struct spi_message *msg)
365{
366 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
367 struct spi_engine_message_state *st = msg->state;
368 unsigned int n, m, i;
369 const uint16_t *buf;
370
371 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
372 while (n && st->cmd_length) {
373 m = min(n, st->cmd_length);
374 buf = st->cmd_buf;
375 for (i = 0; i < m; i++)
376 writel_relaxed(buf[i], addr);
377 st->cmd_buf += m;
378 st->cmd_length -= m;
379 n -= m;
380 }
381
382 return st->cmd_length != 0;
383}
384
385static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
386 struct spi_message *msg)
387{
388 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
389 struct spi_engine_message_state *st = msg->state;
390 unsigned int n, m, i;
391
392 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
393 while (n && st->tx_length) {
394 if (st->tx_xfer->bits_per_word <= 8) {
395 const u8 *buf = st->tx_buf;
396
397 m = min(n, st->tx_length);
398 for (i = 0; i < m; i++)
399 writel_relaxed(buf[i], addr);
400 st->tx_buf += m;
401 st->tx_length -= m;
402 } else if (st->tx_xfer->bits_per_word <= 16) {
403 const u16 *buf = (const u16 *)st->tx_buf;
404
405 m = min(n, st->tx_length / 2);
406 for (i = 0; i < m; i++)
407 writel_relaxed(buf[i], addr);
408 st->tx_buf += m * 2;
409 st->tx_length -= m * 2;
410 } else {
411 const u32 *buf = (const u32 *)st->tx_buf;
412
413 m = min(n, st->tx_length / 4);
414 for (i = 0; i < m; i++)
415 writel_relaxed(buf[i], addr);
416 st->tx_buf += m * 4;
417 st->tx_length -= m * 4;
418 }
419 n -= m;
420 if (st->tx_length == 0)
421 spi_engine_tx_next(msg);
422 }
423
424 return st->tx_length != 0;
425}
426
427static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
428 struct spi_message *msg)
429{
430 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
431 struct spi_engine_message_state *st = msg->state;
432 unsigned int n, m, i;
433
434 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
435 while (n && st->rx_length) {
436 if (st->rx_xfer->bits_per_word <= 8) {
437 u8 *buf = st->rx_buf;
438
439 m = min(n, st->rx_length);
440 for (i = 0; i < m; i++)
441 buf[i] = readl_relaxed(addr);
442 st->rx_buf += m;
443 st->rx_length -= m;
444 } else if (st->rx_xfer->bits_per_word <= 16) {
445 u16 *buf = (u16 *)st->rx_buf;
446
447 m = min(n, st->rx_length / 2);
448 for (i = 0; i < m; i++)
449 buf[i] = readl_relaxed(addr);
450 st->rx_buf += m * 2;
451 st->rx_length -= m * 2;
452 } else {
453 u32 *buf = (u32 *)st->rx_buf;
454
455 m = min(n, st->rx_length / 4);
456 for (i = 0; i < m; i++)
457 buf[i] = readl_relaxed(addr);
458 st->rx_buf += m * 4;
459 st->rx_length -= m * 4;
460 }
461 n -= m;
462 if (st->rx_length == 0)
463 spi_engine_rx_next(msg);
464 }
465
466 return st->rx_length != 0;
467}
468
469static irqreturn_t spi_engine_irq(int irq, void *devid)
470{
471 struct spi_controller *host = devid;
472 struct spi_message *msg = host->cur_msg;
473 struct spi_engine *spi_engine = spi_controller_get_devdata(host);
474 unsigned int disable_int = 0;
475 unsigned int pending;
476 int completed_id = -1;
477
478 pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
479
480 if (pending & SPI_ENGINE_INT_SYNC) {
481 writel_relaxed(SPI_ENGINE_INT_SYNC,
482 spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
483 completed_id = readl_relaxed(
484 spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
485 }
486
487 spin_lock(&spi_engine->lock);
488
489 if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
490 if (!spi_engine_write_cmd_fifo(spi_engine, msg))
491 disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
492 }
493
494 if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
495 if (!spi_engine_write_tx_fifo(spi_engine, msg))
496 disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
497 }
498
499 if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
500 if (!spi_engine_read_rx_fifo(spi_engine, msg))
501 disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
502 }
503
504 if (pending & SPI_ENGINE_INT_SYNC && msg) {
505 if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
506 msg->status = 0;
507 msg->actual_length = msg->frame_length;
508 complete(&spi_engine->msg_complete);
509 disable_int |= SPI_ENGINE_INT_SYNC;
510 }
511 }
512
513 if (disable_int) {
514 spi_engine->int_enable &= ~disable_int;
515 writel_relaxed(spi_engine->int_enable,
516 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
517 }
518
519 spin_unlock(&spi_engine->lock);
520
521 return IRQ_HANDLED;
522}
523
524static int spi_engine_optimize_message(struct spi_message *msg)
525{
526 struct spi_engine_program p_dry, *p;
527
528 spi_engine_precompile_message(msg);
529
530 p_dry.length = 0;
531 spi_engine_compile_message(msg, true, &p_dry);
532
533 p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
534 if (!p)
535 return -ENOMEM;
536
537 spi_engine_compile_message(msg, false, p);
538
539 spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
540 AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
541
542 msg->opt_state = p;
543
544 return 0;
545}
546
547static int spi_engine_unoptimize_message(struct spi_message *msg)
548{
549 kfree(msg->opt_state);
550
551 return 0;
552}
553
554static int spi_engine_setup(struct spi_device *device)
555{
556 struct spi_controller *host = device->controller;
557 struct spi_engine *spi_engine = spi_controller_get_devdata(host);
558
559 if (device->mode & SPI_CS_HIGH)
560 spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
561 else
562 spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
563
564 writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
565 spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
566
567 /*
568 * In addition to setting the flags, we have to do a CS assert command
569 * to make the new setting actually take effect.
570 */
571 writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
572 spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
573
574 return 0;
575}
576
577static int spi_engine_transfer_one_message(struct spi_controller *host,
578 struct spi_message *msg)
579{
580 struct spi_engine *spi_engine = spi_controller_get_devdata(host);
581 struct spi_engine_message_state *st = &spi_engine->msg_state;
582 struct spi_engine_program *p = msg->opt_state;
583 unsigned int int_enable = 0;
584 unsigned long flags;
585
586 /* reinitialize message state for this transfer */
587 memset(st, 0, sizeof(*st));
588 st->cmd_buf = p->instructions;
589 st->cmd_length = p->length;
590 msg->state = st;
591
592 reinit_completion(&spi_engine->msg_complete);
593
594 if (trace_spi_transfer_start_enabled()) {
595 struct spi_transfer *xfer;
596
597 list_for_each_entry(xfer, &msg->transfers, transfer_list)
598 trace_spi_transfer_start(msg, xfer);
599 }
600
601 spin_lock_irqsave(&spi_engine->lock, flags);
602
603 if (spi_engine_write_cmd_fifo(spi_engine, msg))
604 int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
605
606 spi_engine_tx_next(msg);
607 if (spi_engine_write_tx_fifo(spi_engine, msg))
608 int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
609
610 spi_engine_rx_next(msg);
611 if (st->rx_length != 0)
612 int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
613
614 int_enable |= SPI_ENGINE_INT_SYNC;
615
616 writel_relaxed(int_enable,
617 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
618 spi_engine->int_enable = int_enable;
619 spin_unlock_irqrestore(&spi_engine->lock, flags);
620
621 if (!wait_for_completion_timeout(&spi_engine->msg_complete,
622 msecs_to_jiffies(5000))) {
623 dev_err(&host->dev,
624 "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
625 msg->status = -ETIMEDOUT;
626 }
627
628 if (trace_spi_transfer_stop_enabled()) {
629 struct spi_transfer *xfer;
630
631 list_for_each_entry(xfer, &msg->transfers, transfer_list)
632 trace_spi_transfer_stop(msg, xfer);
633 }
634
635 spi_finalize_current_message(host);
636
637 return msg->status;
638}
639
640static void spi_engine_release_hw(void *p)
641{
642 struct spi_engine *spi_engine = p;
643
644 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
645 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
646 writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
647}
648
649static int spi_engine_probe(struct platform_device *pdev)
650{
651 struct spi_engine *spi_engine;
652 struct spi_controller *host;
653 unsigned int version;
654 int irq;
655 int ret;
656
657 irq = platform_get_irq(pdev, 0);
658 if (irq < 0)
659 return irq;
660
661 host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
662 if (!host)
663 return -ENOMEM;
664
665 spi_engine = spi_controller_get_devdata(host);
666
667 spin_lock_init(&spi_engine->lock);
668 init_completion(&spi_engine->msg_complete);
669
670 spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
671 if (IS_ERR(spi_engine->clk))
672 return PTR_ERR(spi_engine->clk);
673
674 spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
675 if (IS_ERR(spi_engine->ref_clk))
676 return PTR_ERR(spi_engine->ref_clk);
677
678 spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
679 if (IS_ERR(spi_engine->base))
680 return PTR_ERR(spi_engine->base);
681
682 version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
683 if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
684 dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
685 ADI_AXI_PCORE_VER_MAJOR(version),
686 ADI_AXI_PCORE_VER_MINOR(version),
687 ADI_AXI_PCORE_VER_PATCH(version));
688 return -ENODEV;
689 }
690
691 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
692 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
693 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
694
695 ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
696 spi_engine);
697 if (ret)
698 return ret;
699
700 ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
701 host);
702 if (ret)
703 return ret;
704
705 host->dev.of_node = pdev->dev.of_node;
706 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
707 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
708 host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
709 host->transfer_one_message = spi_engine_transfer_one_message;
710 host->optimize_message = spi_engine_optimize_message;
711 host->unoptimize_message = spi_engine_unoptimize_message;
712 host->num_chipselect = 8;
713
714 /* Some features depend of the IP core version. */
715 if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) {
716 if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
717 host->mode_bits |= SPI_CS_HIGH;
718 host->setup = spi_engine_setup;
719 }
720 if (ADI_AXI_PCORE_VER_MINOR(version) >= 3)
721 host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
722 }
723
724 if (host->max_speed_hz == 0)
725 return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
726
727 return devm_spi_register_controller(&pdev->dev, host);
728}
729
730static const struct of_device_id spi_engine_match_table[] = {
731 { .compatible = "adi,axi-spi-engine-1.00.a" },
732 { },
733};
734MODULE_DEVICE_TABLE(of, spi_engine_match_table);
735
736static struct platform_driver spi_engine_driver = {
737 .probe = spi_engine_probe,
738 .driver = {
739 .name = "spi-engine",
740 .of_match_table = spi_engine_match_table,
741 },
742};
743module_platform_driver(spi_engine_driver);
744
745MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
746MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
747MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SPI-Engine SPI controller driver
4 * Copyright 2015 Analog Devices Inc.
5 * Author: Lars-Peter Clausen <lars@metafoo.de>
6 */
7
8#include <linux/clk.h>
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/of.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/spi/spi.h>
15
16#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff)
17#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff)
18#define SPI_ENGINE_VERSION_PATCH(x) (x & 0xff)
19
20#define SPI_ENGINE_REG_VERSION 0x00
21
22#define SPI_ENGINE_REG_RESET 0x40
23
24#define SPI_ENGINE_REG_INT_ENABLE 0x80
25#define SPI_ENGINE_REG_INT_PENDING 0x84
26#define SPI_ENGINE_REG_INT_SOURCE 0x88
27
28#define SPI_ENGINE_REG_SYNC_ID 0xc0
29
30#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
31#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
32#define SPI_ENGINE_REG_SDI_FIFO_LEVEL 0xd8
33
34#define SPI_ENGINE_REG_CMD_FIFO 0xe0
35#define SPI_ENGINE_REG_SDO_DATA_FIFO 0xe4
36#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
37#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
38
39#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
40#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
41#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
42#define SPI_ENGINE_INT_SYNC BIT(3)
43
44#define SPI_ENGINE_CONFIG_CPHA BIT(0)
45#define SPI_ENGINE_CONFIG_CPOL BIT(1)
46#define SPI_ENGINE_CONFIG_3WIRE BIT(2)
47
48#define SPI_ENGINE_INST_TRANSFER 0x0
49#define SPI_ENGINE_INST_ASSERT 0x1
50#define SPI_ENGINE_INST_WRITE 0x2
51#define SPI_ENGINE_INST_MISC 0x3
52
53#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
54#define SPI_ENGINE_CMD_REG_CONFIG 0x1
55
56#define SPI_ENGINE_MISC_SYNC 0x0
57#define SPI_ENGINE_MISC_SLEEP 0x1
58
59#define SPI_ENGINE_TRANSFER_WRITE 0x1
60#define SPI_ENGINE_TRANSFER_READ 0x2
61
62#define SPI_ENGINE_CMD(inst, arg1, arg2) \
63 (((inst) << 12) | ((arg1) << 8) | (arg2))
64
65#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
66 SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
67#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
68 SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
69#define SPI_ENGINE_CMD_WRITE(reg, val) \
70 SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
71#define SPI_ENGINE_CMD_SLEEP(delay) \
72 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
73#define SPI_ENGINE_CMD_SYNC(id) \
74 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
75
76struct spi_engine_program {
77 unsigned int length;
78 uint16_t instructions[];
79};
80
81struct spi_engine {
82 struct clk *clk;
83 struct clk *ref_clk;
84
85 spinlock_t lock;
86
87 void __iomem *base;
88
89 struct spi_message *msg;
90 struct spi_engine_program *p;
91 unsigned cmd_length;
92 const uint16_t *cmd_buf;
93
94 struct spi_transfer *tx_xfer;
95 unsigned int tx_length;
96 const uint8_t *tx_buf;
97
98 struct spi_transfer *rx_xfer;
99 unsigned int rx_length;
100 uint8_t *rx_buf;
101
102 unsigned int sync_id;
103 unsigned int completed_id;
104
105 unsigned int int_enable;
106};
107
108static void spi_engine_program_add_cmd(struct spi_engine_program *p,
109 bool dry, uint16_t cmd)
110{
111 if (!dry)
112 p->instructions[p->length] = cmd;
113 p->length++;
114}
115
116static unsigned int spi_engine_get_config(struct spi_device *spi)
117{
118 unsigned int config = 0;
119
120 if (spi->mode & SPI_CPOL)
121 config |= SPI_ENGINE_CONFIG_CPOL;
122 if (spi->mode & SPI_CPHA)
123 config |= SPI_ENGINE_CONFIG_CPHA;
124 if (spi->mode & SPI_3WIRE)
125 config |= SPI_ENGINE_CONFIG_3WIRE;
126
127 return config;
128}
129
130static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
131 struct spi_device *spi, struct spi_transfer *xfer)
132{
133 unsigned int clk_div;
134
135 clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
136 xfer->speed_hz * 2);
137 if (clk_div > 255)
138 clk_div = 255;
139 else if (clk_div > 0)
140 clk_div -= 1;
141
142 return clk_div;
143}
144
145static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
146 struct spi_transfer *xfer)
147{
148 unsigned int len = xfer->len;
149
150 while (len) {
151 unsigned int n = min(len, 256U);
152 unsigned int flags = 0;
153
154 if (xfer->tx_buf)
155 flags |= SPI_ENGINE_TRANSFER_WRITE;
156 if (xfer->rx_buf)
157 flags |= SPI_ENGINE_TRANSFER_READ;
158
159 spi_engine_program_add_cmd(p, dry,
160 SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
161 len -= n;
162 }
163}
164
165static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
166 struct spi_engine *spi_engine, unsigned int clk_div,
167 struct spi_transfer *xfer)
168{
169 unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
170 unsigned int t;
171 int delay;
172
173 if (xfer->delay_usecs) {
174 delay = xfer->delay_usecs;
175 } else {
176 delay = spi_delay_to_ns(&xfer->delay, xfer);
177 if (delay < 0)
178 return;
179 delay /= 1000;
180 }
181
182 if (delay == 0)
183 return;
184
185 t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
186 while (t) {
187 unsigned int n = min(t, 256U);
188
189 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
190 t -= n;
191 }
192}
193
194static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
195 struct spi_device *spi, bool assert)
196{
197 unsigned int mask = 0xff;
198
199 if (assert)
200 mask ^= BIT(spi->chip_select);
201
202 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
203}
204
205static int spi_engine_compile_message(struct spi_engine *spi_engine,
206 struct spi_message *msg, bool dry, struct spi_engine_program *p)
207{
208 struct spi_device *spi = msg->spi;
209 struct spi_transfer *xfer;
210 int clk_div, new_clk_div;
211 bool cs_change = true;
212
213 clk_div = -1;
214
215 spi_engine_program_add_cmd(p, dry,
216 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
217 spi_engine_get_config(spi)));
218
219 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
220 new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
221 if (new_clk_div != clk_div) {
222 clk_div = new_clk_div;
223 spi_engine_program_add_cmd(p, dry,
224 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
225 clk_div));
226 }
227
228 if (cs_change)
229 spi_engine_gen_cs(p, dry, spi, true);
230
231 spi_engine_gen_xfer(p, dry, xfer);
232 spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer);
233
234 cs_change = xfer->cs_change;
235 if (list_is_last(&xfer->transfer_list, &msg->transfers))
236 cs_change = !cs_change;
237
238 if (cs_change)
239 spi_engine_gen_cs(p, dry, spi, false);
240 }
241
242 return 0;
243}
244
245static void spi_engine_xfer_next(struct spi_engine *spi_engine,
246 struct spi_transfer **_xfer)
247{
248 struct spi_message *msg = spi_engine->msg;
249 struct spi_transfer *xfer = *_xfer;
250
251 if (!xfer) {
252 xfer = list_first_entry(&msg->transfers,
253 struct spi_transfer, transfer_list);
254 } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
255 xfer = NULL;
256 } else {
257 xfer = list_next_entry(xfer, transfer_list);
258 }
259
260 *_xfer = xfer;
261}
262
263static void spi_engine_tx_next(struct spi_engine *spi_engine)
264{
265 struct spi_transfer *xfer = spi_engine->tx_xfer;
266
267 do {
268 spi_engine_xfer_next(spi_engine, &xfer);
269 } while (xfer && !xfer->tx_buf);
270
271 spi_engine->tx_xfer = xfer;
272 if (xfer) {
273 spi_engine->tx_length = xfer->len;
274 spi_engine->tx_buf = xfer->tx_buf;
275 } else {
276 spi_engine->tx_buf = NULL;
277 }
278}
279
280static void spi_engine_rx_next(struct spi_engine *spi_engine)
281{
282 struct spi_transfer *xfer = spi_engine->rx_xfer;
283
284 do {
285 spi_engine_xfer_next(spi_engine, &xfer);
286 } while (xfer && !xfer->rx_buf);
287
288 spi_engine->rx_xfer = xfer;
289 if (xfer) {
290 spi_engine->rx_length = xfer->len;
291 spi_engine->rx_buf = xfer->rx_buf;
292 } else {
293 spi_engine->rx_buf = NULL;
294 }
295}
296
297static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
298{
299 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
300 unsigned int n, m, i;
301 const uint16_t *buf;
302
303 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
304 while (n && spi_engine->cmd_length) {
305 m = min(n, spi_engine->cmd_length);
306 buf = spi_engine->cmd_buf;
307 for (i = 0; i < m; i++)
308 writel_relaxed(buf[i], addr);
309 spi_engine->cmd_buf += m;
310 spi_engine->cmd_length -= m;
311 n -= m;
312 }
313
314 return spi_engine->cmd_length != 0;
315}
316
317static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
318{
319 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
320 unsigned int n, m, i;
321 const uint8_t *buf;
322
323 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
324 while (n && spi_engine->tx_length) {
325 m = min(n, spi_engine->tx_length);
326 buf = spi_engine->tx_buf;
327 for (i = 0; i < m; i++)
328 writel_relaxed(buf[i], addr);
329 spi_engine->tx_buf += m;
330 spi_engine->tx_length -= m;
331 n -= m;
332 if (spi_engine->tx_length == 0)
333 spi_engine_tx_next(spi_engine);
334 }
335
336 return spi_engine->tx_length != 0;
337}
338
339static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
340{
341 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
342 unsigned int n, m, i;
343 uint8_t *buf;
344
345 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
346 while (n && spi_engine->rx_length) {
347 m = min(n, spi_engine->rx_length);
348 buf = spi_engine->rx_buf;
349 for (i = 0; i < m; i++)
350 buf[i] = readl_relaxed(addr);
351 spi_engine->rx_buf += m;
352 spi_engine->rx_length -= m;
353 n -= m;
354 if (spi_engine->rx_length == 0)
355 spi_engine_rx_next(spi_engine);
356 }
357
358 return spi_engine->rx_length != 0;
359}
360
361static irqreturn_t spi_engine_irq(int irq, void *devid)
362{
363 struct spi_master *master = devid;
364 struct spi_engine *spi_engine = spi_master_get_devdata(master);
365 unsigned int disable_int = 0;
366 unsigned int pending;
367
368 pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
369
370 if (pending & SPI_ENGINE_INT_SYNC) {
371 writel_relaxed(SPI_ENGINE_INT_SYNC,
372 spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
373 spi_engine->completed_id = readl_relaxed(
374 spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
375 }
376
377 spin_lock(&spi_engine->lock);
378
379 if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
380 if (!spi_engine_write_cmd_fifo(spi_engine))
381 disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
382 }
383
384 if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
385 if (!spi_engine_write_tx_fifo(spi_engine))
386 disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
387 }
388
389 if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
390 if (!spi_engine_read_rx_fifo(spi_engine))
391 disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
392 }
393
394 if (pending & SPI_ENGINE_INT_SYNC) {
395 if (spi_engine->msg &&
396 spi_engine->completed_id == spi_engine->sync_id) {
397 struct spi_message *msg = spi_engine->msg;
398
399 kfree(spi_engine->p);
400 msg->status = 0;
401 msg->actual_length = msg->frame_length;
402 spi_engine->msg = NULL;
403 spi_finalize_current_message(master);
404 disable_int |= SPI_ENGINE_INT_SYNC;
405 }
406 }
407
408 if (disable_int) {
409 spi_engine->int_enable &= ~disable_int;
410 writel_relaxed(spi_engine->int_enable,
411 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
412 }
413
414 spin_unlock(&spi_engine->lock);
415
416 return IRQ_HANDLED;
417}
418
419static int spi_engine_transfer_one_message(struct spi_master *master,
420 struct spi_message *msg)
421{
422 struct spi_engine_program p_dry, *p;
423 struct spi_engine *spi_engine = spi_master_get_devdata(master);
424 unsigned int int_enable = 0;
425 unsigned long flags;
426 size_t size;
427
428 p_dry.length = 0;
429 spi_engine_compile_message(spi_engine, msg, true, &p_dry);
430
431 size = sizeof(*p->instructions) * (p_dry.length + 1);
432 p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
433 if (!p)
434 return -ENOMEM;
435 spi_engine_compile_message(spi_engine, msg, false, p);
436
437 spin_lock_irqsave(&spi_engine->lock, flags);
438 spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
439 spi_engine_program_add_cmd(p, false,
440 SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
441
442 spi_engine->msg = msg;
443 spi_engine->p = p;
444
445 spi_engine->cmd_buf = p->instructions;
446 spi_engine->cmd_length = p->length;
447 if (spi_engine_write_cmd_fifo(spi_engine))
448 int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
449
450 spi_engine_tx_next(spi_engine);
451 if (spi_engine_write_tx_fifo(spi_engine))
452 int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
453
454 spi_engine_rx_next(spi_engine);
455 if (spi_engine->rx_length != 0)
456 int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
457
458 int_enable |= SPI_ENGINE_INT_SYNC;
459
460 writel_relaxed(int_enable,
461 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
462 spi_engine->int_enable = int_enable;
463 spin_unlock_irqrestore(&spi_engine->lock, flags);
464
465 return 0;
466}
467
468static int spi_engine_probe(struct platform_device *pdev)
469{
470 struct spi_engine *spi_engine;
471 struct spi_master *master;
472 unsigned int version;
473 int irq;
474 int ret;
475
476 irq = platform_get_irq(pdev, 0);
477 if (irq <= 0)
478 return -ENXIO;
479
480 spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
481 if (!spi_engine)
482 return -ENOMEM;
483
484 master = spi_alloc_master(&pdev->dev, 0);
485 if (!master)
486 return -ENOMEM;
487
488 spi_master_set_devdata(master, spi_engine);
489
490 spin_lock_init(&spi_engine->lock);
491
492 spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
493 if (IS_ERR(spi_engine->clk)) {
494 ret = PTR_ERR(spi_engine->clk);
495 goto err_put_master;
496 }
497
498 spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
499 if (IS_ERR(spi_engine->ref_clk)) {
500 ret = PTR_ERR(spi_engine->ref_clk);
501 goto err_put_master;
502 }
503
504 ret = clk_prepare_enable(spi_engine->clk);
505 if (ret)
506 goto err_put_master;
507
508 ret = clk_prepare_enable(spi_engine->ref_clk);
509 if (ret)
510 goto err_clk_disable;
511
512 spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
513 if (IS_ERR(spi_engine->base)) {
514 ret = PTR_ERR(spi_engine->base);
515 goto err_ref_clk_disable;
516 }
517
518 version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
519 if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
520 dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
521 SPI_ENGINE_VERSION_MAJOR(version),
522 SPI_ENGINE_VERSION_MINOR(version),
523 SPI_ENGINE_VERSION_PATCH(version));
524 ret = -ENODEV;
525 goto err_ref_clk_disable;
526 }
527
528 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
529 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
530 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
531
532 ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
533 if (ret)
534 goto err_ref_clk_disable;
535
536 master->dev.of_node = pdev->dev.of_node;
537 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
538 master->bits_per_word_mask = SPI_BPW_MASK(8);
539 master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
540 master->transfer_one_message = spi_engine_transfer_one_message;
541 master->num_chipselect = 8;
542
543 ret = spi_register_master(master);
544 if (ret)
545 goto err_free_irq;
546
547 platform_set_drvdata(pdev, master);
548
549 return 0;
550err_free_irq:
551 free_irq(irq, master);
552err_ref_clk_disable:
553 clk_disable_unprepare(spi_engine->ref_clk);
554err_clk_disable:
555 clk_disable_unprepare(spi_engine->clk);
556err_put_master:
557 spi_master_put(master);
558 return ret;
559}
560
561static int spi_engine_remove(struct platform_device *pdev)
562{
563 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
564 struct spi_engine *spi_engine = spi_master_get_devdata(master);
565 int irq = platform_get_irq(pdev, 0);
566
567 spi_unregister_master(master);
568
569 free_irq(irq, master);
570
571 spi_master_put(master);
572
573 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
574 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
575 writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
576
577 clk_disable_unprepare(spi_engine->ref_clk);
578 clk_disable_unprepare(spi_engine->clk);
579
580 return 0;
581}
582
583static const struct of_device_id spi_engine_match_table[] = {
584 { .compatible = "adi,axi-spi-engine-1.00.a" },
585 { },
586};
587MODULE_DEVICE_TABLE(of, spi_engine_match_table);
588
589static struct platform_driver spi_engine_driver = {
590 .probe = spi_engine_probe,
591 .remove = spi_engine_remove,
592 .driver = {
593 .name = "spi-engine",
594 .of_match_table = spi_engine_match_table,
595 },
596};
597module_platform_driver(spi_engine_driver);
598
599MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
600MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
601MODULE_LICENSE("GPL");