Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * SPI-Engine SPI controller driver
  4 * Copyright 2015 Analog Devices Inc.
  5 *  Author: Lars-Peter Clausen <lars@metafoo.de>
  6 */
  7
  8#include <linux/clk.h>
  9#include <linux/completion.h>
 10#include <linux/fpga/adi-axi-common.h>
 11#include <linux/interrupt.h>
 12#include <linux/io.h>
 13#include <linux/of.h>
 14#include <linux/module.h>
 15#include <linux/overflow.h>
 16#include <linux/platform_device.h>
 17#include <linux/spi/spi.h>
 18#include <trace/events/spi.h>
 
 
 
 
 
 19
 20#define SPI_ENGINE_REG_RESET			0x40
 21
 22#define SPI_ENGINE_REG_INT_ENABLE		0x80
 23#define SPI_ENGINE_REG_INT_PENDING		0x84
 24#define SPI_ENGINE_REG_INT_SOURCE		0x88
 25
 26#define SPI_ENGINE_REG_SYNC_ID			0xc0
 27
 28#define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
 29#define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
 30#define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
 31
 32#define SPI_ENGINE_REG_CMD_FIFO			0xe0
 33#define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
 34#define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
 35#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
 36
 37#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
 38#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
 39#define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
 40#define SPI_ENGINE_INT_SYNC			BIT(3)
 41
 42#define SPI_ENGINE_CONFIG_CPHA			BIT(0)
 43#define SPI_ENGINE_CONFIG_CPOL			BIT(1)
 44#define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
 45#define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH		BIT(3)
 46
 47#define SPI_ENGINE_INST_TRANSFER		0x0
 48#define SPI_ENGINE_INST_ASSERT			0x1
 49#define SPI_ENGINE_INST_WRITE			0x2
 50#define SPI_ENGINE_INST_MISC			0x3
 51#define SPI_ENGINE_INST_CS_INV			0x4
 52
 53#define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
 54#define SPI_ENGINE_CMD_REG_CONFIG		0x1
 55#define SPI_ENGINE_CMD_REG_XFER_BITS		0x2
 56
 57#define SPI_ENGINE_MISC_SYNC			0x0
 58#define SPI_ENGINE_MISC_SLEEP			0x1
 59
 60#define SPI_ENGINE_TRANSFER_WRITE		0x1
 61#define SPI_ENGINE_TRANSFER_READ		0x2
 62
 63/* Arbitrary sync ID for use by host->cur_msg */
 64#define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID		0x1
 65
 66#define SPI_ENGINE_CMD(inst, arg1, arg2) \
 67	(((inst) << 12) | ((arg1) << 8) | (arg2))
 68
 69#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
 70	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
 71#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
 72	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
 73#define SPI_ENGINE_CMD_WRITE(reg, val) \
 74	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
 75#define SPI_ENGINE_CMD_SLEEP(delay) \
 76	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
 77#define SPI_ENGINE_CMD_SYNC(id) \
 78	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
 79#define SPI_ENGINE_CMD_CS_INV(flags) \
 80	SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
 81
 82struct spi_engine_program {
 83	unsigned int length;
 84	uint16_t instructions[] __counted_by(length);
 85};
 86
 87/**
 88 * struct spi_engine_message_state - SPI engine per-message state
 89 */
 90struct spi_engine_message_state {
 91	/** @cmd_length: Number of elements in cmd_buf array. */
 
 
 
 
 
 92	unsigned cmd_length;
 93	/** @cmd_buf: Array of commands not yet written to CMD FIFO. */
 94	const uint16_t *cmd_buf;
 95	/** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
 96	struct spi_transfer *tx_xfer;
 97	/** @tx_length: Size of tx_buf in bytes. */
 98	unsigned int tx_length;
 99	/** @tx_buf: Bytes not yet written to TX FIFO. */
100	const uint8_t *tx_buf;
101	/** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
102	struct spi_transfer *rx_xfer;
103	/** @rx_length: Size of tx_buf in bytes. */
104	unsigned int rx_length;
105	/** @rx_buf: Bytes not yet written to the RX FIFO. */
106	uint8_t *rx_buf;
107};
108
109struct spi_engine {
110	struct clk *clk;
111	struct clk *ref_clk;
112
113	spinlock_t lock;
 
114
115	void __iomem *base;
116	struct spi_engine_message_state msg_state;
117	struct completion msg_complete;
118	unsigned int int_enable;
119	/* shadows hardware CS inversion flag state */
120	u8 cs_inv;
121};
122
123static void spi_engine_program_add_cmd(struct spi_engine_program *p,
124	bool dry, uint16_t cmd)
125{
126	p->length++;
127
128	if (!dry)
129		p->instructions[p->length - 1] = cmd;
 
130}
131
132static unsigned int spi_engine_get_config(struct spi_device *spi)
133{
134	unsigned int config = 0;
135
136	if (spi->mode & SPI_CPOL)
137		config |= SPI_ENGINE_CONFIG_CPOL;
138	if (spi->mode & SPI_CPHA)
139		config |= SPI_ENGINE_CONFIG_CPHA;
140	if (spi->mode & SPI_3WIRE)
141		config |= SPI_ENGINE_CONFIG_3WIRE;
142	if (spi->mode & SPI_MOSI_IDLE_HIGH)
143		config |= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
144	if (spi->mode & SPI_MOSI_IDLE_LOW)
145		config &= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
146
147	return config;
148}
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
151	struct spi_transfer *xfer)
152{
153	unsigned int len;
154
155	if (xfer->bits_per_word <= 8)
156		len = xfer->len;
157	else if (xfer->bits_per_word <= 16)
158		len = xfer->len / 2;
159	else
160		len = xfer->len / 4;
161
162	while (len) {
163		unsigned int n = min(len, 256U);
164		unsigned int flags = 0;
165
166		if (xfer->tx_buf)
167			flags |= SPI_ENGINE_TRANSFER_WRITE;
168		if (xfer->rx_buf)
169			flags |= SPI_ENGINE_TRANSFER_READ;
170
171		spi_engine_program_add_cmd(p, dry,
172			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
173		len -= n;
174	}
175}
176
177static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
178				 int delay_ns, int inst_ns, u32 sclk_hz)
179{
 
180	unsigned int t;
181
182	/*
183	 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
184	 * delay is less that the instruction execution time, there is no need
185	 * for an extra sleep instruction since the instruction execution time
186	 * will already cover the required delay.
187	 */
188	if (delay_ns < 0 || delay_ns <= inst_ns)
189		return;
190
191	t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
192	while (t) {
193		unsigned int n = min(t, 256U);
194
195		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
196		t -= n;
197	}
198}
199
200static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
201		struct spi_device *spi, bool assert)
202{
203	unsigned int mask = 0xff;
204
205	if (assert)
206		mask ^= BIT(spi_get_chipselect(spi, 0));
207
208	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
209}
210
211/*
212 * Performs precompile steps on the message.
213 *
214 * The SPI core does most of the message/transfer validation and filling in
215 * fields for us via __spi_validate(). This fixes up anything remaining not
216 * done there.
217 *
218 * NB: This is separate from spi_engine_compile_message() because the latter
219 * is called twice and would otherwise result in double-evaluation.
220 */
221static void spi_engine_precompile_message(struct spi_message *msg)
222{
223	unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
224	struct spi_transfer *xfer;
225
226	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
227		clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
228		xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
229	}
230}
231
232static void spi_engine_compile_message(struct spi_message *msg, bool dry,
233				       struct spi_engine_program *p)
234{
235	struct spi_device *spi = msg->spi;
236	struct spi_controller *host = spi->controller;
237	struct spi_transfer *xfer;
238	int clk_div, new_clk_div, inst_ns;
239	bool keep_cs = false;
240	u8 bits_per_word = 0;
241
242	/*
243	 * Take into account instruction execution time for more accurate sleep
244	 * times, especially when the delay is small.
245	 */
246	inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
247
248	clk_div = 1;
249
250	spi_engine_program_add_cmd(p, dry,
251		SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
252			spi_engine_get_config(spi)));
253
254	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
255	spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
256
257	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
258		new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
259		if (new_clk_div != clk_div) {
260			clk_div = new_clk_div;
261			/* actual divider used is register value + 1 */
262			spi_engine_program_add_cmd(p, dry,
263				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
264					clk_div - 1));
265		}
266
267		if (bits_per_word != xfer->bits_per_word && xfer->len) {
268			bits_per_word = xfer->bits_per_word;
269			spi_engine_program_add_cmd(p, dry,
270				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
271					bits_per_word));
272		}
273
274		spi_engine_gen_xfer(p, dry, xfer);
275		spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
276				     inst_ns, xfer->effective_speed_hz);
277
278		if (xfer->cs_change) {
279			if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
280				keep_cs = true;
281			} else {
282				if (!xfer->cs_off)
283					spi_engine_gen_cs(p, dry, spi, false);
284
285				spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
286					&xfer->cs_change_delay, xfer), inst_ns,
287					xfer->effective_speed_hz);
288
289				if (!list_next_entry(xfer, transfer_list)->cs_off)
290					spi_engine_gen_cs(p, dry, spi, true);
291			}
292		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
293			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
294			spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
295		}
296	}
297
298	if (!keep_cs)
299		spi_engine_gen_cs(p, dry, spi, false);
 
300
301	/*
302	 * Restore clockdiv to default so that future gen_sleep commands don't
303	 * have to be aware of the current register state.
304	 */
305	if (clk_div != 1)
306		spi_engine_program_add_cmd(p, dry,
307			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
308}
309
310static void spi_engine_xfer_next(struct spi_message *msg,
311	struct spi_transfer **_xfer)
312{
 
313	struct spi_transfer *xfer = *_xfer;
314
315	if (!xfer) {
316		xfer = list_first_entry(&msg->transfers,
317			struct spi_transfer, transfer_list);
318	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
319		xfer = NULL;
320	} else {
321		xfer = list_next_entry(xfer, transfer_list);
322	}
323
324	*_xfer = xfer;
325}
326
327static void spi_engine_tx_next(struct spi_message *msg)
328{
329	struct spi_engine_message_state *st = msg->state;
330	struct spi_transfer *xfer = st->tx_xfer;
331
332	do {
333		spi_engine_xfer_next(msg, &xfer);
334	} while (xfer && !xfer->tx_buf);
335
336	st->tx_xfer = xfer;
337	if (xfer) {
338		st->tx_length = xfer->len;
339		st->tx_buf = xfer->tx_buf;
340	} else {
341		st->tx_buf = NULL;
342	}
343}
344
345static void spi_engine_rx_next(struct spi_message *msg)
346{
347	struct spi_engine_message_state *st = msg->state;
348	struct spi_transfer *xfer = st->rx_xfer;
349
350	do {
351		spi_engine_xfer_next(msg, &xfer);
352	} while (xfer && !xfer->rx_buf);
353
354	st->rx_xfer = xfer;
355	if (xfer) {
356		st->rx_length = xfer->len;
357		st->rx_buf = xfer->rx_buf;
358	} else {
359		st->rx_buf = NULL;
360	}
361}
362
363static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
364				      struct spi_message *msg)
365{
366	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
367	struct spi_engine_message_state *st = msg->state;
368	unsigned int n, m, i;
369	const uint16_t *buf;
370
371	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
372	while (n && st->cmd_length) {
373		m = min(n, st->cmd_length);
374		buf = st->cmd_buf;
375		for (i = 0; i < m; i++)
376			writel_relaxed(buf[i], addr);
377		st->cmd_buf += m;
378		st->cmd_length -= m;
379		n -= m;
380	}
381
382	return st->cmd_length != 0;
383}
384
385static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
386				     struct spi_message *msg)
387{
388	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
389	struct spi_engine_message_state *st = msg->state;
390	unsigned int n, m, i;
 
391
392	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
393	while (n && st->tx_length) {
394		if (st->tx_xfer->bits_per_word <= 8) {
395			const u8 *buf = st->tx_buf;
396
397			m = min(n, st->tx_length);
398			for (i = 0; i < m; i++)
399				writel_relaxed(buf[i], addr);
400			st->tx_buf += m;
401			st->tx_length -= m;
402		} else if (st->tx_xfer->bits_per_word <= 16) {
403			const u16 *buf = (const u16 *)st->tx_buf;
404
405			m = min(n, st->tx_length / 2);
406			for (i = 0; i < m; i++)
407				writel_relaxed(buf[i], addr);
408			st->tx_buf += m * 2;
409			st->tx_length -= m * 2;
410		} else {
411			const u32 *buf = (const u32 *)st->tx_buf;
412
413			m = min(n, st->tx_length / 4);
414			for (i = 0; i < m; i++)
415				writel_relaxed(buf[i], addr);
416			st->tx_buf += m * 4;
417			st->tx_length -= m * 4;
418		}
419		n -= m;
420		if (st->tx_length == 0)
421			spi_engine_tx_next(msg);
422	}
423
424	return st->tx_length != 0;
425}
426
427static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
428				    struct spi_message *msg)
429{
430	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
431	struct spi_engine_message_state *st = msg->state;
432	unsigned int n, m, i;
 
433
434	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
435	while (n && st->rx_length) {
436		if (st->rx_xfer->bits_per_word <= 8) {
437			u8 *buf = st->rx_buf;
438
439			m = min(n, st->rx_length);
440			for (i = 0; i < m; i++)
441				buf[i] = readl_relaxed(addr);
442			st->rx_buf += m;
443			st->rx_length -= m;
444		} else if (st->rx_xfer->bits_per_word <= 16) {
445			u16 *buf = (u16 *)st->rx_buf;
446
447			m = min(n, st->rx_length / 2);
448			for (i = 0; i < m; i++)
449				buf[i] = readl_relaxed(addr);
450			st->rx_buf += m * 2;
451			st->rx_length -= m * 2;
452		} else {
453			u32 *buf = (u32 *)st->rx_buf;
454
455			m = min(n, st->rx_length / 4);
456			for (i = 0; i < m; i++)
457				buf[i] = readl_relaxed(addr);
458			st->rx_buf += m * 4;
459			st->rx_length -= m * 4;
460		}
461		n -= m;
462		if (st->rx_length == 0)
463			spi_engine_rx_next(msg);
464	}
465
466	return st->rx_length != 0;
467}
468
469static irqreturn_t spi_engine_irq(int irq, void *devid)
470{
471	struct spi_controller *host = devid;
472	struct spi_message *msg = host->cur_msg;
473	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
474	unsigned int disable_int = 0;
475	unsigned int pending;
476	int completed_id = -1;
477
478	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
479
480	if (pending & SPI_ENGINE_INT_SYNC) {
481		writel_relaxed(SPI_ENGINE_INT_SYNC,
482			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
483		completed_id = readl_relaxed(
484			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
485	}
486
487	spin_lock(&spi_engine->lock);
488
489	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
490		if (!spi_engine_write_cmd_fifo(spi_engine, msg))
491			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
492	}
493
494	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
495		if (!spi_engine_write_tx_fifo(spi_engine, msg))
496			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
497	}
498
499	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
500		if (!spi_engine_read_rx_fifo(spi_engine, msg))
501			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
502	}
503
504	if (pending & SPI_ENGINE_INT_SYNC && msg) {
505		if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
 
 
 
 
506			msg->status = 0;
507			msg->actual_length = msg->frame_length;
508			complete(&spi_engine->msg_complete);
 
509			disable_int |= SPI_ENGINE_INT_SYNC;
510		}
511	}
512
513	if (disable_int) {
514		spi_engine->int_enable &= ~disable_int;
515		writel_relaxed(spi_engine->int_enable,
516			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
517	}
518
519	spin_unlock(&spi_engine->lock);
520
521	return IRQ_HANDLED;
522}
523
524static int spi_engine_optimize_message(struct spi_message *msg)
 
525{
526	struct spi_engine_program p_dry, *p;
527
528	spi_engine_precompile_message(msg);
 
 
529
530	p_dry.length = 0;
531	spi_engine_compile_message(msg, true, &p_dry);
532
533	p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
 
534	if (!p)
535		return -ENOMEM;
536
537	spi_engine_compile_message(msg, false, p);
538
539	spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
540						AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
541
542	msg->opt_state = p;
543
544	return 0;
545}
546
547static int spi_engine_unoptimize_message(struct spi_message *msg)
548{
549	kfree(msg->opt_state);
550
551	return 0;
552}
553
554static int spi_engine_setup(struct spi_device *device)
555{
556	struct spi_controller *host = device->controller;
557	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
558
559	if (device->mode & SPI_CS_HIGH)
560		spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
561	else
562		spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
563
564	writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
565		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
566
567	/*
568	 * In addition to setting the flags, we have to do a CS assert command
569	 * to make the new setting actually take effect.
570	 */
571	writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
572		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
573
574	return 0;
575}
576
577static int spi_engine_transfer_one_message(struct spi_controller *host,
578	struct spi_message *msg)
579{
580	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
581	struct spi_engine_message_state *st = &spi_engine->msg_state;
582	struct spi_engine_program *p = msg->opt_state;
583	unsigned int int_enable = 0;
584	unsigned long flags;
585
586	/* reinitialize message state for this transfer */
587	memset(st, 0, sizeof(*st));
588	st->cmd_buf = p->instructions;
589	st->cmd_length = p->length;
590	msg->state = st;
591
592	reinit_completion(&spi_engine->msg_complete);
593
594	if (trace_spi_transfer_start_enabled()) {
595		struct spi_transfer *xfer;
596
597		list_for_each_entry(xfer, &msg->transfers, transfer_list)
598			trace_spi_transfer_start(msg, xfer);
599	}
600
601	spin_lock_irqsave(&spi_engine->lock, flags);
602
603	if (spi_engine_write_cmd_fifo(spi_engine, msg))
 
 
 
 
 
 
 
 
604		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
605
606	spi_engine_tx_next(msg);
607	if (spi_engine_write_tx_fifo(spi_engine, msg))
608		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
609
610	spi_engine_rx_next(msg);
611	if (st->rx_length != 0)
612		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
613
614	int_enable |= SPI_ENGINE_INT_SYNC;
615
616	writel_relaxed(int_enable,
617		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
618	spi_engine->int_enable = int_enable;
619	spin_unlock_irqrestore(&spi_engine->lock, flags);
620
621	if (!wait_for_completion_timeout(&spi_engine->msg_complete,
622					 msecs_to_jiffies(5000))) {
623		dev_err(&host->dev,
624			"Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
625		msg->status = -ETIMEDOUT;
626	}
627
628	if (trace_spi_transfer_stop_enabled()) {
629		struct spi_transfer *xfer;
630
631		list_for_each_entry(xfer, &msg->transfers, transfer_list)
632			trace_spi_transfer_stop(msg, xfer);
633	}
634
635	spi_finalize_current_message(host);
636
637	return msg->status;
638}
639
640static void spi_engine_release_hw(void *p)
641{
642	struct spi_engine *spi_engine = p;
643
644	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
645	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
646	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
647}
648
649static int spi_engine_probe(struct platform_device *pdev)
650{
651	struct spi_engine *spi_engine;
652	struct spi_controller *host;
653	unsigned int version;
654	int irq;
655	int ret;
656
657	irq = platform_get_irq(pdev, 0);
658	if (irq < 0)
659		return irq;
 
 
 
 
660
661	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
662	if (!host)
663		return -ENOMEM;
664
665	spi_engine = spi_controller_get_devdata(host);
666
667	spin_lock_init(&spi_engine->lock);
668	init_completion(&spi_engine->msg_complete);
669
670	spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
671	if (IS_ERR(spi_engine->clk))
672		return PTR_ERR(spi_engine->clk);
673
674	spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
675	if (IS_ERR(spi_engine->ref_clk))
676		return PTR_ERR(spi_engine->ref_clk);
677
678	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
679	if (IS_ERR(spi_engine->base))
680		return PTR_ERR(spi_engine->base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
681
682	version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
683	if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
684		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
685			ADI_AXI_PCORE_VER_MAJOR(version),
686			ADI_AXI_PCORE_VER_MINOR(version),
687			ADI_AXI_PCORE_VER_PATCH(version));
688		return -ENODEV;
689	}
690
 
 
 
 
 
 
 
 
691	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
692	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
693	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
694
695	ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
696				       spi_engine);
697	if (ret)
698		return ret;
 
 
 
 
 
 
 
699
700	ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
701			       host);
702	if (ret)
703		return ret;
704
705	host->dev.of_node = pdev->dev.of_node;
706	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
707	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
708	host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
709	host->transfer_one_message = spi_engine_transfer_one_message;
710	host->optimize_message = spi_engine_optimize_message;
711	host->unoptimize_message = spi_engine_unoptimize_message;
712	host->num_chipselect = 8;
713
714	/* Some features depend of the IP core version. */
715	if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) {
716		if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
717			host->mode_bits |= SPI_CS_HIGH;
718			host->setup = spi_engine_setup;
719		}
720		if (ADI_AXI_PCORE_VER_MINOR(version) >= 3)
721			host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
722	}
 
 
 
723
724	if (host->max_speed_hz == 0)
725		return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
726
727	return devm_spi_register_controller(&pdev->dev, host);
 
 
 
 
 
 
 
 
 
728}
729
730static const struct of_device_id spi_engine_match_table[] = {
731	{ .compatible = "adi,axi-spi-engine-1.00.a" },
732	{ },
733};
734MODULE_DEVICE_TABLE(of, spi_engine_match_table);
735
736static struct platform_driver spi_engine_driver = {
737	.probe = spi_engine_probe,
 
738	.driver = {
739		.name = "spi-engine",
740		.of_match_table = spi_engine_match_table,
741	},
742};
743module_platform_driver(spi_engine_driver);
744
745MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
746MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
747MODULE_LICENSE("GPL");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * SPI-Engine SPI controller driver
  4 * Copyright 2015 Analog Devices Inc.
  5 *  Author: Lars-Peter Clausen <lars@metafoo.de>
  6 */
  7
  8#include <linux/clk.h>
 
 
  9#include <linux/interrupt.h>
 10#include <linux/io.h>
 11#include <linux/of.h>
 12#include <linux/module.h>
 
 13#include <linux/platform_device.h>
 14#include <linux/spi/spi.h>
 15
 16#define SPI_ENGINE_VERSION_MAJOR(x)	((x >> 16) & 0xff)
 17#define SPI_ENGINE_VERSION_MINOR(x)	((x >> 8) & 0xff)
 18#define SPI_ENGINE_VERSION_PATCH(x)	(x & 0xff)
 19
 20#define SPI_ENGINE_REG_VERSION			0x00
 21
 22#define SPI_ENGINE_REG_RESET			0x40
 23
 24#define SPI_ENGINE_REG_INT_ENABLE		0x80
 25#define SPI_ENGINE_REG_INT_PENDING		0x84
 26#define SPI_ENGINE_REG_INT_SOURCE		0x88
 27
 28#define SPI_ENGINE_REG_SYNC_ID			0xc0
 29
 30#define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
 31#define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
 32#define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
 33
 34#define SPI_ENGINE_REG_CMD_FIFO			0xe0
 35#define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
 36#define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
 37#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
 38
 39#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
 40#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
 41#define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
 42#define SPI_ENGINE_INT_SYNC			BIT(3)
 43
 44#define SPI_ENGINE_CONFIG_CPHA			BIT(0)
 45#define SPI_ENGINE_CONFIG_CPOL			BIT(1)
 46#define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
 
 47
 48#define SPI_ENGINE_INST_TRANSFER		0x0
 49#define SPI_ENGINE_INST_ASSERT			0x1
 50#define SPI_ENGINE_INST_WRITE			0x2
 51#define SPI_ENGINE_INST_MISC			0x3
 
 52
 53#define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
 54#define SPI_ENGINE_CMD_REG_CONFIG		0x1
 
 55
 56#define SPI_ENGINE_MISC_SYNC			0x0
 57#define SPI_ENGINE_MISC_SLEEP			0x1
 58
 59#define SPI_ENGINE_TRANSFER_WRITE		0x1
 60#define SPI_ENGINE_TRANSFER_READ		0x2
 61
 
 
 
 62#define SPI_ENGINE_CMD(inst, arg1, arg2) \
 63	(((inst) << 12) | ((arg1) << 8) | (arg2))
 64
 65#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
 66	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
 67#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
 68	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
 69#define SPI_ENGINE_CMD_WRITE(reg, val) \
 70	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
 71#define SPI_ENGINE_CMD_SLEEP(delay) \
 72	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
 73#define SPI_ENGINE_CMD_SYNC(id) \
 74	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
 
 
 75
 76struct spi_engine_program {
 77	unsigned int length;
 78	uint16_t instructions[];
 79};
 80
 81struct spi_engine {
 82	struct clk *clk;
 83	struct clk *ref_clk;
 84
 85	spinlock_t lock;
 86
 87	void __iomem *base;
 88
 89	struct spi_message *msg;
 90	struct spi_engine_program *p;
 91	unsigned cmd_length;
 
 92	const uint16_t *cmd_buf;
 93
 94	struct spi_transfer *tx_xfer;
 
 95	unsigned int tx_length;
 
 96	const uint8_t *tx_buf;
 97
 98	struct spi_transfer *rx_xfer;
 
 99	unsigned int rx_length;
 
100	uint8_t *rx_buf;
 
 
 
 
 
101
102	unsigned int sync_id;
103	unsigned int completed_id;
104
 
 
 
105	unsigned int int_enable;
 
 
106};
107
108static void spi_engine_program_add_cmd(struct spi_engine_program *p,
109	bool dry, uint16_t cmd)
110{
 
 
111	if (!dry)
112		p->instructions[p->length] = cmd;
113	p->length++;
114}
115
116static unsigned int spi_engine_get_config(struct spi_device *spi)
117{
118	unsigned int config = 0;
119
120	if (spi->mode & SPI_CPOL)
121		config |= SPI_ENGINE_CONFIG_CPOL;
122	if (spi->mode & SPI_CPHA)
123		config |= SPI_ENGINE_CONFIG_CPHA;
124	if (spi->mode & SPI_3WIRE)
125		config |= SPI_ENGINE_CONFIG_3WIRE;
 
 
 
 
126
127	return config;
128}
129
130static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
131	struct spi_device *spi, struct spi_transfer *xfer)
132{
133	unsigned int clk_div;
134
135	clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
136		xfer->speed_hz * 2);
137	if (clk_div > 255)
138		clk_div = 255;
139	else if (clk_div > 0)
140		clk_div -= 1;
141
142	return clk_div;
143}
144
145static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
146	struct spi_transfer *xfer)
147{
148	unsigned int len = xfer->len;
 
 
 
 
 
 
 
149
150	while (len) {
151		unsigned int n = min(len, 256U);
152		unsigned int flags = 0;
153
154		if (xfer->tx_buf)
155			flags |= SPI_ENGINE_TRANSFER_WRITE;
156		if (xfer->rx_buf)
157			flags |= SPI_ENGINE_TRANSFER_READ;
158
159		spi_engine_program_add_cmd(p, dry,
160			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
161		len -= n;
162	}
163}
164
165static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
166	struct spi_engine *spi_engine, unsigned int clk_div, unsigned int delay)
167{
168	unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
169	unsigned int t;
170
171	if (delay == 0)
 
 
 
 
 
 
172		return;
173
174	t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
175	while (t) {
176		unsigned int n = min(t, 256U);
177
178		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
179		t -= n;
180	}
181}
182
183static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
184		struct spi_device *spi, bool assert)
185{
186	unsigned int mask = 0xff;
187
188	if (assert)
189		mask ^= BIT(spi->chip_select);
190
191	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
192}
193
194static int spi_engine_compile_message(struct spi_engine *spi_engine,
195	struct spi_message *msg, bool dry, struct spi_engine_program *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196{
197	struct spi_device *spi = msg->spi;
 
198	struct spi_transfer *xfer;
199	int clk_div, new_clk_div;
200	bool cs_change = true;
 
 
 
 
 
 
 
201
202	clk_div = -1;
203
204	spi_engine_program_add_cmd(p, dry,
205		SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
206			spi_engine_get_config(spi)));
207
 
 
 
208	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
209		new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
210		if (new_clk_div != clk_div) {
211			clk_div = new_clk_div;
 
212			spi_engine_program_add_cmd(p, dry,
213				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
214					clk_div));
215		}
216
217		if (cs_change)
218			spi_engine_gen_cs(p, dry, spi, true);
 
 
 
 
219
220		spi_engine_gen_xfer(p, dry, xfer);
221		spi_engine_gen_sleep(p, dry, spi_engine, clk_div,
222			xfer->delay_usecs);
223
224		cs_change = xfer->cs_change;
225		if (list_is_last(&xfer->transfer_list, &msg->transfers))
226			cs_change = !cs_change;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
228		if (cs_change)
229			spi_engine_gen_cs(p, dry, spi, false);
230	}
231
232	return 0;
 
 
 
 
 
 
233}
234
235static void spi_engine_xfer_next(struct spi_engine *spi_engine,
236	struct spi_transfer **_xfer)
237{
238	struct spi_message *msg = spi_engine->msg;
239	struct spi_transfer *xfer = *_xfer;
240
241	if (!xfer) {
242		xfer = list_first_entry(&msg->transfers,
243			struct spi_transfer, transfer_list);
244	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
245		xfer = NULL;
246	} else {
247		xfer = list_next_entry(xfer, transfer_list);
248	}
249
250	*_xfer = xfer;
251}
252
253static void spi_engine_tx_next(struct spi_engine *spi_engine)
254{
255	struct spi_transfer *xfer = spi_engine->tx_xfer;
 
256
257	do {
258		spi_engine_xfer_next(spi_engine, &xfer);
259	} while (xfer && !xfer->tx_buf);
260
261	spi_engine->tx_xfer = xfer;
262	if (xfer) {
263		spi_engine->tx_length = xfer->len;
264		spi_engine->tx_buf = xfer->tx_buf;
265	} else {
266		spi_engine->tx_buf = NULL;
267	}
268}
269
270static void spi_engine_rx_next(struct spi_engine *spi_engine)
271{
272	struct spi_transfer *xfer = spi_engine->rx_xfer;
 
273
274	do {
275		spi_engine_xfer_next(spi_engine, &xfer);
276	} while (xfer && !xfer->rx_buf);
277
278	spi_engine->rx_xfer = xfer;
279	if (xfer) {
280		spi_engine->rx_length = xfer->len;
281		spi_engine->rx_buf = xfer->rx_buf;
282	} else {
283		spi_engine->rx_buf = NULL;
284	}
285}
286
287static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
 
288{
289	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
 
290	unsigned int n, m, i;
291	const uint16_t *buf;
292
293	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
294	while (n && spi_engine->cmd_length) {
295		m = min(n, spi_engine->cmd_length);
296		buf = spi_engine->cmd_buf;
297		for (i = 0; i < m; i++)
298			writel_relaxed(buf[i], addr);
299		spi_engine->cmd_buf += m;
300		spi_engine->cmd_length -= m;
301		n -= m;
302	}
303
304	return spi_engine->cmd_length != 0;
305}
306
307static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
 
308{
309	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
 
310	unsigned int n, m, i;
311	const uint8_t *buf;
312
313	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
314	while (n && spi_engine->tx_length) {
315		m = min(n, spi_engine->tx_length);
316		buf = spi_engine->tx_buf;
317		for (i = 0; i < m; i++)
318			writel_relaxed(buf[i], addr);
319		spi_engine->tx_buf += m;
320		spi_engine->tx_length -= m;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321		n -= m;
322		if (spi_engine->tx_length == 0)
323			spi_engine_tx_next(spi_engine);
324	}
325
326	return spi_engine->tx_length != 0;
327}
328
329static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
 
330{
331	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
 
332	unsigned int n, m, i;
333	uint8_t *buf;
334
335	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
336	while (n && spi_engine->rx_length) {
337		m = min(n, spi_engine->rx_length);
338		buf = spi_engine->rx_buf;
339		for (i = 0; i < m; i++)
340			buf[i] = readl_relaxed(addr);
341		spi_engine->rx_buf += m;
342		spi_engine->rx_length -= m;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343		n -= m;
344		if (spi_engine->rx_length == 0)
345			spi_engine_rx_next(spi_engine);
346	}
347
348	return spi_engine->rx_length != 0;
349}
350
351static irqreturn_t spi_engine_irq(int irq, void *devid)
352{
353	struct spi_master *master = devid;
354	struct spi_engine *spi_engine = spi_master_get_devdata(master);
 
355	unsigned int disable_int = 0;
356	unsigned int pending;
 
357
358	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
359
360	if (pending & SPI_ENGINE_INT_SYNC) {
361		writel_relaxed(SPI_ENGINE_INT_SYNC,
362			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
363		spi_engine->completed_id = readl_relaxed(
364			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
365	}
366
367	spin_lock(&spi_engine->lock);
368
369	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
370		if (!spi_engine_write_cmd_fifo(spi_engine))
371			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
372	}
373
374	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
375		if (!spi_engine_write_tx_fifo(spi_engine))
376			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
377	}
378
379	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
380		if (!spi_engine_read_rx_fifo(spi_engine))
381			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
382	}
383
384	if (pending & SPI_ENGINE_INT_SYNC) {
385		if (spi_engine->msg &&
386		    spi_engine->completed_id == spi_engine->sync_id) {
387			struct spi_message *msg = spi_engine->msg;
388
389			kfree(spi_engine->p);
390			msg->status = 0;
391			msg->actual_length = msg->frame_length;
392			spi_engine->msg = NULL;
393			spi_finalize_current_message(master);
394			disable_int |= SPI_ENGINE_INT_SYNC;
395		}
396	}
397
398	if (disable_int) {
399		spi_engine->int_enable &= ~disable_int;
400		writel_relaxed(spi_engine->int_enable,
401			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
402	}
403
404	spin_unlock(&spi_engine->lock);
405
406	return IRQ_HANDLED;
407}
408
409static int spi_engine_transfer_one_message(struct spi_master *master,
410	struct spi_message *msg)
411{
412	struct spi_engine_program p_dry, *p;
413	struct spi_engine *spi_engine = spi_master_get_devdata(master);
414	unsigned int int_enable = 0;
415	unsigned long flags;
416	size_t size;
417
418	p_dry.length = 0;
419	spi_engine_compile_message(spi_engine, msg, true, &p_dry);
420
421	size = sizeof(*p->instructions) * (p_dry.length + 1);
422	p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
423	if (!p)
424		return -ENOMEM;
425	spi_engine_compile_message(spi_engine, msg, false, p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
427	spin_lock_irqsave(&spi_engine->lock, flags);
428	spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
429	spi_engine_program_add_cmd(p, false,
430		SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
431
432	spi_engine->msg = msg;
433	spi_engine->p = p;
434
435	spi_engine->cmd_buf = p->instructions;
436	spi_engine->cmd_length = p->length;
437	if (spi_engine_write_cmd_fifo(spi_engine))
438		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
439
440	spi_engine_tx_next(spi_engine);
441	if (spi_engine_write_tx_fifo(spi_engine))
442		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
443
444	spi_engine_rx_next(spi_engine);
445	if (spi_engine->rx_length != 0)
446		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
447
448	int_enable |= SPI_ENGINE_INT_SYNC;
449
450	writel_relaxed(int_enable,
451		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
452	spi_engine->int_enable = int_enable;
453	spin_unlock_irqrestore(&spi_engine->lock, flags);
454
455	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456}
457
458static int spi_engine_probe(struct platform_device *pdev)
459{
460	struct spi_engine *spi_engine;
461	struct spi_master *master;
462	unsigned int version;
463	int irq;
464	int ret;
465
466	irq = platform_get_irq(pdev, 0);
467	if (irq <= 0)
468		return -ENXIO;
469
470	spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
471	if (!spi_engine)
472		return -ENOMEM;
473
474	master = spi_alloc_master(&pdev->dev, 0);
475	if (!master)
476		return -ENOMEM;
477
478	spi_master_set_devdata(master, spi_engine);
479
480	spin_lock_init(&spi_engine->lock);
 
 
 
 
 
 
 
 
 
481
482	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
483	if (IS_ERR(spi_engine->base)) {
484		ret = PTR_ERR(spi_engine->base);
485		goto err_put_master;
486	}
487
488	version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
489	if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
490		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
491			SPI_ENGINE_VERSION_MAJOR(version),
492			SPI_ENGINE_VERSION_MINOR(version),
493			SPI_ENGINE_VERSION_PATCH(version));
494		ret = -ENODEV;
495		goto err_put_master;
496	}
497
498	spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
499	if (IS_ERR(spi_engine->clk)) {
500		ret = PTR_ERR(spi_engine->clk);
501		goto err_put_master;
502	}
503
504	spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
505	if (IS_ERR(spi_engine->ref_clk)) {
506		ret = PTR_ERR(spi_engine->ref_clk);
507		goto err_put_master;
 
 
 
508	}
509
510	ret = clk_prepare_enable(spi_engine->clk);
511	if (ret)
512		goto err_put_master;
513
514	ret = clk_prepare_enable(spi_engine->ref_clk);
515	if (ret)
516		goto err_clk_disable;
517
518	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
519	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
520	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
521
522	ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
 
523	if (ret)
524		goto err_ref_clk_disable;
525
526	master->dev.of_node = pdev->dev.of_node;
527	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
528	master->bits_per_word_mask = SPI_BPW_MASK(8);
529	master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
530	master->transfer_one_message = spi_engine_transfer_one_message;
531	master->num_chipselect = 8;
532
533	ret = spi_register_master(master);
 
534	if (ret)
535		goto err_free_irq;
536
537	platform_set_drvdata(pdev, master);
538
539	return 0;
540err_free_irq:
541	free_irq(irq, master);
542err_ref_clk_disable:
543	clk_disable_unprepare(spi_engine->ref_clk);
544err_clk_disable:
545	clk_disable_unprepare(spi_engine->clk);
546err_put_master:
547	spi_master_put(master);
548	return ret;
549}
550
551static int spi_engine_remove(struct platform_device *pdev)
552{
553	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
554	struct spi_engine *spi_engine = spi_master_get_devdata(master);
555	int irq = platform_get_irq(pdev, 0);
556
557	spi_unregister_master(master);
558
559	free_irq(irq, master);
 
560
561	spi_master_put(master);
562
563	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
564	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
565	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
566
567	clk_disable_unprepare(spi_engine->ref_clk);
568	clk_disable_unprepare(spi_engine->clk);
569
570	return 0;
571}
572
573static const struct of_device_id spi_engine_match_table[] = {
574	{ .compatible = "adi,axi-spi-engine-1.00.a" },
575	{ },
576};
577MODULE_DEVICE_TABLE(of, spi_engine_match_table);
578
579static struct platform_driver spi_engine_driver = {
580	.probe = spi_engine_probe,
581	.remove = spi_engine_remove,
582	.driver = {
583		.name = "spi-engine",
584		.of_match_table = spi_engine_match_table,
585	},
586};
587module_platform_driver(spi_engine_driver);
588
589MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
590MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
591MODULE_LICENSE("GPL");