Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * SPI-Engine SPI controller driver
  4 * Copyright 2015 Analog Devices Inc.
  5 *  Author: Lars-Peter Clausen <lars@metafoo.de>
  6 */
  7
  8#include <linux/clk.h>
 
  9#include <linux/interrupt.h>
 10#include <linux/io.h>
 11#include <linux/of.h>
 12#include <linux/module.h>
 13#include <linux/platform_device.h>
 14#include <linux/spi/spi.h>
 
 15
 16#define SPI_ENGINE_VERSION_MAJOR(x)	((x >> 16) & 0xff)
 17#define SPI_ENGINE_VERSION_MINOR(x)	((x >> 8) & 0xff)
 18#define SPI_ENGINE_VERSION_PATCH(x)	(x & 0xff)
 19
 20#define SPI_ENGINE_REG_VERSION			0x00
 21
 22#define SPI_ENGINE_REG_RESET			0x40
 23
 24#define SPI_ENGINE_REG_INT_ENABLE		0x80
 25#define SPI_ENGINE_REG_INT_PENDING		0x84
 26#define SPI_ENGINE_REG_INT_SOURCE		0x88
 27
 28#define SPI_ENGINE_REG_SYNC_ID			0xc0
 29
 30#define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
 31#define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
 32#define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
 33
 34#define SPI_ENGINE_REG_CMD_FIFO			0xe0
 35#define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
 36#define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
 37#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
 38
 39#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
 40#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
 41#define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
 42#define SPI_ENGINE_INT_SYNC			BIT(3)
 43
 44#define SPI_ENGINE_CONFIG_CPHA			BIT(0)
 45#define SPI_ENGINE_CONFIG_CPOL			BIT(1)
 46#define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
 47
 48#define SPI_ENGINE_INST_TRANSFER		0x0
 49#define SPI_ENGINE_INST_ASSERT			0x1
 50#define SPI_ENGINE_INST_WRITE			0x2
 51#define SPI_ENGINE_INST_MISC			0x3
 52
 53#define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
 54#define SPI_ENGINE_CMD_REG_CONFIG		0x1
 
 55
 56#define SPI_ENGINE_MISC_SYNC			0x0
 57#define SPI_ENGINE_MISC_SLEEP			0x1
 58
 59#define SPI_ENGINE_TRANSFER_WRITE		0x1
 60#define SPI_ENGINE_TRANSFER_READ		0x2
 61
 62#define SPI_ENGINE_CMD(inst, arg1, arg2) \
 63	(((inst) << 12) | ((arg1) << 8) | (arg2))
 64
 65#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
 66	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
 67#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
 68	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
 69#define SPI_ENGINE_CMD_WRITE(reg, val) \
 70	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
 71#define SPI_ENGINE_CMD_SLEEP(delay) \
 72	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
 73#define SPI_ENGINE_CMD_SYNC(id) \
 74	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
 75
 76struct spi_engine_program {
 77	unsigned int length;
 78	uint16_t instructions[];
 79};
 80
 81struct spi_engine {
 82	struct clk *clk;
 83	struct clk *ref_clk;
 84
 85	spinlock_t lock;
 86
 87	void __iomem *base;
 88
 89	struct spi_message *msg;
 90	struct spi_engine_program *p;
 
 91	unsigned cmd_length;
 
 92	const uint16_t *cmd_buf;
 93
 94	struct spi_transfer *tx_xfer;
 
 95	unsigned int tx_length;
 
 96	const uint8_t *tx_buf;
 97
 98	struct spi_transfer *rx_xfer;
 
 99	unsigned int rx_length;
 
100	uint8_t *rx_buf;
 
 
 
101
102	unsigned int sync_id;
103	unsigned int completed_id;
 
 
 
 
 
 
 
 
104
105	unsigned int int_enable;
106};
107
108static void spi_engine_program_add_cmd(struct spi_engine_program *p,
109	bool dry, uint16_t cmd)
110{
111	if (!dry)
112		p->instructions[p->length] = cmd;
113	p->length++;
114}
115
116static unsigned int spi_engine_get_config(struct spi_device *spi)
117{
118	unsigned int config = 0;
119
120	if (spi->mode & SPI_CPOL)
121		config |= SPI_ENGINE_CONFIG_CPOL;
122	if (spi->mode & SPI_CPHA)
123		config |= SPI_ENGINE_CONFIG_CPHA;
124	if (spi->mode & SPI_3WIRE)
125		config |= SPI_ENGINE_CONFIG_3WIRE;
126
127	return config;
128}
129
130static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
131	struct spi_device *spi, struct spi_transfer *xfer)
132{
133	unsigned int clk_div;
134
135	clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
136		xfer->speed_hz * 2);
137	if (clk_div > 255)
138		clk_div = 255;
139	else if (clk_div > 0)
140		clk_div -= 1;
141
142	return clk_div;
143}
144
145static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
146	struct spi_transfer *xfer)
147{
148	unsigned int len = xfer->len;
 
 
 
 
 
 
 
149
150	while (len) {
151		unsigned int n = min(len, 256U);
152		unsigned int flags = 0;
153
154		if (xfer->tx_buf)
155			flags |= SPI_ENGINE_TRANSFER_WRITE;
156		if (xfer->rx_buf)
157			flags |= SPI_ENGINE_TRANSFER_READ;
158
159		spi_engine_program_add_cmd(p, dry,
160			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
161		len -= n;
162	}
163}
164
165static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
166	struct spi_engine *spi_engine, unsigned int clk_div,
167	struct spi_transfer *xfer)
168{
169	unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
170	unsigned int t;
171	int delay;
172
173	if (xfer->delay_usecs) {
174		delay = xfer->delay_usecs;
175	} else {
176		delay = spi_delay_to_ns(&xfer->delay, xfer);
177		if (delay < 0)
178			return;
179		delay /= 1000;
180	}
181
182	if (delay == 0)
183		return;
184
185	t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
 
186	while (t) {
187		unsigned int n = min(t, 256U);
188
189		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
190		t -= n;
191	}
192}
193
194static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
195		struct spi_device *spi, bool assert)
196{
197	unsigned int mask = 0xff;
198
199	if (assert)
200		mask ^= BIT(spi->chip_select);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
202	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
 
 
 
203}
204
205static int spi_engine_compile_message(struct spi_engine *spi_engine,
206	struct spi_message *msg, bool dry, struct spi_engine_program *p)
207{
208	struct spi_device *spi = msg->spi;
 
209	struct spi_transfer *xfer;
210	int clk_div, new_clk_div;
211	bool cs_change = true;
 
212
213	clk_div = -1;
214
215	spi_engine_program_add_cmd(p, dry,
216		SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
217			spi_engine_get_config(spi)));
218
 
 
 
219	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
220		new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
221		if (new_clk_div != clk_div) {
222			clk_div = new_clk_div;
 
223			spi_engine_program_add_cmd(p, dry,
224				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
225					clk_div));
226		}
227
228		if (cs_change)
229			spi_engine_gen_cs(p, dry, spi, true);
 
 
 
 
230
231		spi_engine_gen_xfer(p, dry, xfer);
232		spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer);
233
234		cs_change = xfer->cs_change;
235		if (list_is_last(&xfer->transfer_list, &msg->transfers))
236			cs_change = !cs_change;
237
238		if (cs_change)
239			spi_engine_gen_cs(p, dry, spi, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240	}
241
242	return 0;
 
 
 
 
 
 
 
 
 
243}
244
245static void spi_engine_xfer_next(struct spi_engine *spi_engine,
246	struct spi_transfer **_xfer)
247{
248	struct spi_message *msg = spi_engine->msg;
249	struct spi_transfer *xfer = *_xfer;
250
251	if (!xfer) {
252		xfer = list_first_entry(&msg->transfers,
253			struct spi_transfer, transfer_list);
254	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
255		xfer = NULL;
256	} else {
257		xfer = list_next_entry(xfer, transfer_list);
258	}
259
260	*_xfer = xfer;
261}
262
263static void spi_engine_tx_next(struct spi_engine *spi_engine)
264{
265	struct spi_transfer *xfer = spi_engine->tx_xfer;
 
266
267	do {
268		spi_engine_xfer_next(spi_engine, &xfer);
269	} while (xfer && !xfer->tx_buf);
270
271	spi_engine->tx_xfer = xfer;
272	if (xfer) {
273		spi_engine->tx_length = xfer->len;
274		spi_engine->tx_buf = xfer->tx_buf;
275	} else {
276		spi_engine->tx_buf = NULL;
277	}
278}
279
280static void spi_engine_rx_next(struct spi_engine *spi_engine)
281{
282	struct spi_transfer *xfer = spi_engine->rx_xfer;
 
283
284	do {
285		spi_engine_xfer_next(spi_engine, &xfer);
286	} while (xfer && !xfer->rx_buf);
287
288	spi_engine->rx_xfer = xfer;
289	if (xfer) {
290		spi_engine->rx_length = xfer->len;
291		spi_engine->rx_buf = xfer->rx_buf;
292	} else {
293		spi_engine->rx_buf = NULL;
294	}
295}
296
297static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
 
298{
299	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
 
300	unsigned int n, m, i;
301	const uint16_t *buf;
302
303	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
304	while (n && spi_engine->cmd_length) {
305		m = min(n, spi_engine->cmd_length);
306		buf = spi_engine->cmd_buf;
307		for (i = 0; i < m; i++)
308			writel_relaxed(buf[i], addr);
309		spi_engine->cmd_buf += m;
310		spi_engine->cmd_length -= m;
311		n -= m;
312	}
313
314	return spi_engine->cmd_length != 0;
315}
316
317static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
 
318{
319	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
 
320	unsigned int n, m, i;
321	const uint8_t *buf;
322
323	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
324	while (n && spi_engine->tx_length) {
325		m = min(n, spi_engine->tx_length);
326		buf = spi_engine->tx_buf;
327		for (i = 0; i < m; i++)
328			writel_relaxed(buf[i], addr);
329		spi_engine->tx_buf += m;
330		spi_engine->tx_length -= m;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331		n -= m;
332		if (spi_engine->tx_length == 0)
333			spi_engine_tx_next(spi_engine);
334	}
335
336	return spi_engine->tx_length != 0;
337}
338
339static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
 
340{
341	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
 
342	unsigned int n, m, i;
343	uint8_t *buf;
344
345	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
346	while (n && spi_engine->rx_length) {
347		m = min(n, spi_engine->rx_length);
348		buf = spi_engine->rx_buf;
349		for (i = 0; i < m; i++)
350			buf[i] = readl_relaxed(addr);
351		spi_engine->rx_buf += m;
352		spi_engine->rx_length -= m;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353		n -= m;
354		if (spi_engine->rx_length == 0)
355			spi_engine_rx_next(spi_engine);
356	}
357
358	return spi_engine->rx_length != 0;
359}
360
361static irqreturn_t spi_engine_irq(int irq, void *devid)
362{
363	struct spi_master *master = devid;
364	struct spi_engine *spi_engine = spi_master_get_devdata(master);
 
365	unsigned int disable_int = 0;
366	unsigned int pending;
 
367
368	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
369
370	if (pending & SPI_ENGINE_INT_SYNC) {
371		writel_relaxed(SPI_ENGINE_INT_SYNC,
372			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
373		spi_engine->completed_id = readl_relaxed(
374			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
375	}
376
377	spin_lock(&spi_engine->lock);
378
379	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
380		if (!spi_engine_write_cmd_fifo(spi_engine))
381			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
382	}
383
384	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
385		if (!spi_engine_write_tx_fifo(spi_engine))
386			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
387	}
388
389	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
390		if (!spi_engine_read_rx_fifo(spi_engine))
391			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
392	}
393
394	if (pending & SPI_ENGINE_INT_SYNC) {
395		if (spi_engine->msg &&
396		    spi_engine->completed_id == spi_engine->sync_id) {
397			struct spi_message *msg = spi_engine->msg;
398
399			kfree(spi_engine->p);
400			msg->status = 0;
401			msg->actual_length = msg->frame_length;
402			spi_engine->msg = NULL;
403			spi_finalize_current_message(master);
404			disable_int |= SPI_ENGINE_INT_SYNC;
405		}
406	}
407
408	if (disable_int) {
409		spi_engine->int_enable &= ~disable_int;
410		writel_relaxed(spi_engine->int_enable,
411			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
412	}
413
414	spin_unlock(&spi_engine->lock);
415
416	return IRQ_HANDLED;
417}
418
419static int spi_engine_transfer_one_message(struct spi_master *master,
420	struct spi_message *msg)
421{
422	struct spi_engine_program p_dry, *p;
423	struct spi_engine *spi_engine = spi_master_get_devdata(master);
424	unsigned int int_enable = 0;
425	unsigned long flags;
426	size_t size;
 
 
 
 
 
 
 
427
428	p_dry.length = 0;
429	spi_engine_compile_message(spi_engine, msg, true, &p_dry);
430
431	size = sizeof(*p->instructions) * (p_dry.length + 1);
432	p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
433	if (!p)
 
434		return -ENOMEM;
435	spi_engine_compile_message(spi_engine, msg, false, p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
437	spin_lock_irqsave(&spi_engine->lock, flags);
438	spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
439	spi_engine_program_add_cmd(p, false,
440		SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
441
442	spi_engine->msg = msg;
443	spi_engine->p = p;
444
445	spi_engine->cmd_buf = p->instructions;
446	spi_engine->cmd_length = p->length;
447	if (spi_engine_write_cmd_fifo(spi_engine))
448		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
449
450	spi_engine_tx_next(spi_engine);
451	if (spi_engine_write_tx_fifo(spi_engine))
452		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
453
454	spi_engine_rx_next(spi_engine);
455	if (spi_engine->rx_length != 0)
456		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
457
458	int_enable |= SPI_ENGINE_INT_SYNC;
459
460	writel_relaxed(int_enable,
461		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
462	spi_engine->int_enable = int_enable;
463	spin_unlock_irqrestore(&spi_engine->lock, flags);
464
465	return 0;
466}
467
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468static int spi_engine_probe(struct platform_device *pdev)
469{
470	struct spi_engine *spi_engine;
471	struct spi_master *master;
472	unsigned int version;
473	int irq;
474	int ret;
475
476	irq = platform_get_irq(pdev, 0);
477	if (irq <= 0)
478		return -ENXIO;
479
480	spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
481	if (!spi_engine)
482		return -ENOMEM;
483
484	master = spi_alloc_master(&pdev->dev, 0);
485	if (!master)
486		return -ENOMEM;
487
488	spi_master_set_devdata(master, spi_engine);
489
490	spin_lock_init(&spi_engine->lock);
491
492	spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
493	if (IS_ERR(spi_engine->clk)) {
494		ret = PTR_ERR(spi_engine->clk);
495		goto err_put_master;
496	}
497
498	spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
499	if (IS_ERR(spi_engine->ref_clk)) {
500		ret = PTR_ERR(spi_engine->ref_clk);
501		goto err_put_master;
502	}
503
504	ret = clk_prepare_enable(spi_engine->clk);
505	if (ret)
506		goto err_put_master;
507
508	ret = clk_prepare_enable(spi_engine->ref_clk);
509	if (ret)
510		goto err_clk_disable;
511
512	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
513	if (IS_ERR(spi_engine->base)) {
514		ret = PTR_ERR(spi_engine->base);
515		goto err_ref_clk_disable;
516	}
517
518	version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
519	if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
520		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
521			SPI_ENGINE_VERSION_MAJOR(version),
522			SPI_ENGINE_VERSION_MINOR(version),
523			SPI_ENGINE_VERSION_PATCH(version));
524		ret = -ENODEV;
525		goto err_ref_clk_disable;
526	}
527
528	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
529	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
530	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
531
532	ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
 
533	if (ret)
534		goto err_ref_clk_disable;
535
536	master->dev.of_node = pdev->dev.of_node;
537	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
538	master->bits_per_word_mask = SPI_BPW_MASK(8);
539	master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
540	master->transfer_one_message = spi_engine_transfer_one_message;
541	master->num_chipselect = 8;
542
543	ret = spi_register_master(master);
544	if (ret)
545		goto err_free_irq;
546
547	platform_set_drvdata(pdev, master);
548
549	return 0;
550err_free_irq:
551	free_irq(irq, master);
552err_ref_clk_disable:
553	clk_disable_unprepare(spi_engine->ref_clk);
554err_clk_disable:
555	clk_disable_unprepare(spi_engine->clk);
556err_put_master:
557	spi_master_put(master);
558	return ret;
559}
560
561static int spi_engine_remove(struct platform_device *pdev)
562{
563	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
564	struct spi_engine *spi_engine = spi_master_get_devdata(master);
565	int irq = platform_get_irq(pdev, 0);
566
567	spi_unregister_master(master);
 
568
569	free_irq(irq, master);
 
570
571	spi_master_put(master);
572
573	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
574	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
575	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
576
577	clk_disable_unprepare(spi_engine->ref_clk);
578	clk_disable_unprepare(spi_engine->clk);
579
580	return 0;
581}
582
583static const struct of_device_id spi_engine_match_table[] = {
584	{ .compatible = "adi,axi-spi-engine-1.00.a" },
585	{ },
586};
587MODULE_DEVICE_TABLE(of, spi_engine_match_table);
588
589static struct platform_driver spi_engine_driver = {
590	.probe = spi_engine_probe,
591	.remove = spi_engine_remove,
592	.driver = {
593		.name = "spi-engine",
594		.of_match_table = spi_engine_match_table,
595	},
596};
597module_platform_driver(spi_engine_driver);
598
599MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
600MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
601MODULE_LICENSE("GPL");
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * SPI-Engine SPI controller driver
  4 * Copyright 2015 Analog Devices Inc.
  5 *  Author: Lars-Peter Clausen <lars@metafoo.de>
  6 */
  7
  8#include <linux/clk.h>
  9#include <linux/idr.h>
 10#include <linux/interrupt.h>
 11#include <linux/io.h>
 12#include <linux/of.h>
 13#include <linux/module.h>
 14#include <linux/platform_device.h>
 15#include <linux/spi/spi.h>
 16#include <linux/timer.h>
 17
 18#define SPI_ENGINE_VERSION_MAJOR(x)	((x >> 16) & 0xff)
 19#define SPI_ENGINE_VERSION_MINOR(x)	((x >> 8) & 0xff)
 20#define SPI_ENGINE_VERSION_PATCH(x)	(x & 0xff)
 21
 22#define SPI_ENGINE_REG_VERSION			0x00
 23
 24#define SPI_ENGINE_REG_RESET			0x40
 25
 26#define SPI_ENGINE_REG_INT_ENABLE		0x80
 27#define SPI_ENGINE_REG_INT_PENDING		0x84
 28#define SPI_ENGINE_REG_INT_SOURCE		0x88
 29
 30#define SPI_ENGINE_REG_SYNC_ID			0xc0
 31
 32#define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
 33#define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
 34#define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
 35
 36#define SPI_ENGINE_REG_CMD_FIFO			0xe0
 37#define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
 38#define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
 39#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
 40
 41#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
 42#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
 43#define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
 44#define SPI_ENGINE_INT_SYNC			BIT(3)
 45
 46#define SPI_ENGINE_CONFIG_CPHA			BIT(0)
 47#define SPI_ENGINE_CONFIG_CPOL			BIT(1)
 48#define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
 49
 50#define SPI_ENGINE_INST_TRANSFER		0x0
 51#define SPI_ENGINE_INST_ASSERT			0x1
 52#define SPI_ENGINE_INST_WRITE			0x2
 53#define SPI_ENGINE_INST_MISC			0x3
 54
 55#define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
 56#define SPI_ENGINE_CMD_REG_CONFIG		0x1
 57#define SPI_ENGINE_CMD_REG_XFER_BITS		0x2
 58
 59#define SPI_ENGINE_MISC_SYNC			0x0
 60#define SPI_ENGINE_MISC_SLEEP			0x1
 61
 62#define SPI_ENGINE_TRANSFER_WRITE		0x1
 63#define SPI_ENGINE_TRANSFER_READ		0x2
 64
 65#define SPI_ENGINE_CMD(inst, arg1, arg2) \
 66	(((inst) << 12) | ((arg1) << 8) | (arg2))
 67
 68#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
 69	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
 70#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
 71	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
 72#define SPI_ENGINE_CMD_WRITE(reg, val) \
 73	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
 74#define SPI_ENGINE_CMD_SLEEP(delay) \
 75	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
 76#define SPI_ENGINE_CMD_SYNC(id) \
 77	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
 78
 79struct spi_engine_program {
 80	unsigned int length;
 81	uint16_t instructions[];
 82};
 83
 84/**
 85 * struct spi_engine_message_state - SPI engine per-message state
 86 */
 87struct spi_engine_message_state {
 88	/** @p: Instructions for executing this message. */
 
 
 
 
 89	struct spi_engine_program *p;
 90	/** @cmd_length: Number of elements in cmd_buf array. */
 91	unsigned cmd_length;
 92	/** @cmd_buf: Array of commands not yet written to CMD FIFO. */
 93	const uint16_t *cmd_buf;
 94	/** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
 95	struct spi_transfer *tx_xfer;
 96	/** @tx_length: Size of tx_buf in bytes. */
 97	unsigned int tx_length;
 98	/** @tx_buf: Bytes not yet written to TX FIFO. */
 99	const uint8_t *tx_buf;
100	/** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
101	struct spi_transfer *rx_xfer;
102	/** @rx_length: Size of tx_buf in bytes. */
103	unsigned int rx_length;
104	/** @rx_buf: Bytes not yet written to the RX FIFO. */
105	uint8_t *rx_buf;
106	/** @sync_id: ID to correlate SYNC interrupts with this message. */
107	u8 sync_id;
108};
109
110struct spi_engine {
111	struct clk *clk;
112	struct clk *ref_clk;
113
114	spinlock_t lock;
115
116	void __iomem *base;
117	struct ida sync_ida;
118	struct timer_list watchdog_timer;
119	struct spi_controller *controller;
120
121	unsigned int int_enable;
122};
123
124static void spi_engine_program_add_cmd(struct spi_engine_program *p,
125	bool dry, uint16_t cmd)
126{
127	if (!dry)
128		p->instructions[p->length] = cmd;
129	p->length++;
130}
131
132static unsigned int spi_engine_get_config(struct spi_device *spi)
133{
134	unsigned int config = 0;
135
136	if (spi->mode & SPI_CPOL)
137		config |= SPI_ENGINE_CONFIG_CPOL;
138	if (spi->mode & SPI_CPHA)
139		config |= SPI_ENGINE_CONFIG_CPHA;
140	if (spi->mode & SPI_3WIRE)
141		config |= SPI_ENGINE_CONFIG_3WIRE;
142
143	return config;
144}
145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
147	struct spi_transfer *xfer)
148{
149	unsigned int len;
150
151	if (xfer->bits_per_word <= 8)
152		len = xfer->len;
153	else if (xfer->bits_per_word <= 16)
154		len = xfer->len / 2;
155	else
156		len = xfer->len / 4;
157
158	while (len) {
159		unsigned int n = min(len, 256U);
160		unsigned int flags = 0;
161
162		if (xfer->tx_buf)
163			flags |= SPI_ENGINE_TRANSFER_WRITE;
164		if (xfer->rx_buf)
165			flags |= SPI_ENGINE_TRANSFER_READ;
166
167		spi_engine_program_add_cmd(p, dry,
168			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
169		len -= n;
170	}
171}
172
173static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
174				 int delay_ns, u32 sclk_hz)
 
175{
 
176	unsigned int t;
 
177
178	/* negative delay indicates error, e.g. from spi_delay_to_ns() */
179	if (delay_ns <= 0)
 
 
 
 
 
 
 
 
180		return;
181
182	/* rounding down since executing the instruction adds a couple of ticks delay */
183	t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC);
184	while (t) {
185		unsigned int n = min(t, 256U);
186
187		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
188		t -= n;
189	}
190}
191
192static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
193		struct spi_device *spi, bool assert)
194{
195	unsigned int mask = 0xff;
196
197	if (assert)
198		mask ^= BIT(spi_get_chipselect(spi, 0));
199
200	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
201}
202
203/*
204 * Performs precompile steps on the message.
205 *
206 * The SPI core does most of the message/transfer validation and filling in
207 * fields for us via __spi_validate(). This fixes up anything remaining not
208 * done there.
209 *
210 * NB: This is separate from spi_engine_compile_message() because the latter
211 * is called twice and would otherwise result in double-evaluation.
212 */
213static void spi_engine_precompile_message(struct spi_message *msg)
214{
215	unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
216	struct spi_transfer *xfer;
217
218	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
219		clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
220		xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
221	}
222}
223
224static void spi_engine_compile_message(struct spi_message *msg, bool dry,
225				       struct spi_engine_program *p)
226{
227	struct spi_device *spi = msg->spi;
228	struct spi_controller *host = spi->controller;
229	struct spi_transfer *xfer;
230	int clk_div, new_clk_div;
231	bool keep_cs = false;
232	u8 bits_per_word = 0;
233
234	clk_div = 1;
235
236	spi_engine_program_add_cmd(p, dry,
237		SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
238			spi_engine_get_config(spi)));
239
240	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
241	spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
242
243	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
244		new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
245		if (new_clk_div != clk_div) {
246			clk_div = new_clk_div;
247			/* actual divider used is register value + 1 */
248			spi_engine_program_add_cmd(p, dry,
249				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
250					clk_div - 1));
251		}
252
253		if (bits_per_word != xfer->bits_per_word) {
254			bits_per_word = xfer->bits_per_word;
255			spi_engine_program_add_cmd(p, dry,
256				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
257					bits_per_word));
258		}
259
260		spi_engine_gen_xfer(p, dry, xfer);
261		spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
262				     xfer->effective_speed_hz);
 
 
 
263
264		if (xfer->cs_change) {
265			if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
266				keep_cs = true;
267			} else {
268				if (!xfer->cs_off)
269					spi_engine_gen_cs(p, dry, spi, false);
270
271				spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
272					&xfer->cs_change_delay, xfer),
273					xfer->effective_speed_hz);
274
275				if (!list_next_entry(xfer, transfer_list)->cs_off)
276					spi_engine_gen_cs(p, dry, spi, true);
277			}
278		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
279			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
280			spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
281		}
282	}
283
284	if (!keep_cs)
285		spi_engine_gen_cs(p, dry, spi, false);
286
287	/*
288	 * Restore clockdiv to default so that future gen_sleep commands don't
289	 * have to be aware of the current register state.
290	 */
291	if (clk_div != 1)
292		spi_engine_program_add_cmd(p, dry,
293			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
294}
295
296static void spi_engine_xfer_next(struct spi_message *msg,
297	struct spi_transfer **_xfer)
298{
 
299	struct spi_transfer *xfer = *_xfer;
300
301	if (!xfer) {
302		xfer = list_first_entry(&msg->transfers,
303			struct spi_transfer, transfer_list);
304	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
305		xfer = NULL;
306	} else {
307		xfer = list_next_entry(xfer, transfer_list);
308	}
309
310	*_xfer = xfer;
311}
312
313static void spi_engine_tx_next(struct spi_message *msg)
314{
315	struct spi_engine_message_state *st = msg->state;
316	struct spi_transfer *xfer = st->tx_xfer;
317
318	do {
319		spi_engine_xfer_next(msg, &xfer);
320	} while (xfer && !xfer->tx_buf);
321
322	st->tx_xfer = xfer;
323	if (xfer) {
324		st->tx_length = xfer->len;
325		st->tx_buf = xfer->tx_buf;
326	} else {
327		st->tx_buf = NULL;
328	}
329}
330
331static void spi_engine_rx_next(struct spi_message *msg)
332{
333	struct spi_engine_message_state *st = msg->state;
334	struct spi_transfer *xfer = st->rx_xfer;
335
336	do {
337		spi_engine_xfer_next(msg, &xfer);
338	} while (xfer && !xfer->rx_buf);
339
340	st->rx_xfer = xfer;
341	if (xfer) {
342		st->rx_length = xfer->len;
343		st->rx_buf = xfer->rx_buf;
344	} else {
345		st->rx_buf = NULL;
346	}
347}
348
349static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
350				      struct spi_message *msg)
351{
352	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
353	struct spi_engine_message_state *st = msg->state;
354	unsigned int n, m, i;
355	const uint16_t *buf;
356
357	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
358	while (n && st->cmd_length) {
359		m = min(n, st->cmd_length);
360		buf = st->cmd_buf;
361		for (i = 0; i < m; i++)
362			writel_relaxed(buf[i], addr);
363		st->cmd_buf += m;
364		st->cmd_length -= m;
365		n -= m;
366	}
367
368	return st->cmd_length != 0;
369}
370
371static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
372				     struct spi_message *msg)
373{
374	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
375	struct spi_engine_message_state *st = msg->state;
376	unsigned int n, m, i;
 
377
378	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
379	while (n && st->tx_length) {
380		if (st->tx_xfer->bits_per_word <= 8) {
381			const u8 *buf = st->tx_buf;
382
383			m = min(n, st->tx_length);
384			for (i = 0; i < m; i++)
385				writel_relaxed(buf[i], addr);
386			st->tx_buf += m;
387			st->tx_length -= m;
388		} else if (st->tx_xfer->bits_per_word <= 16) {
389			const u16 *buf = (const u16 *)st->tx_buf;
390
391			m = min(n, st->tx_length / 2);
392			for (i = 0; i < m; i++)
393				writel_relaxed(buf[i], addr);
394			st->tx_buf += m * 2;
395			st->tx_length -= m * 2;
396		} else {
397			const u32 *buf = (const u32 *)st->tx_buf;
398
399			m = min(n, st->tx_length / 4);
400			for (i = 0; i < m; i++)
401				writel_relaxed(buf[i], addr);
402			st->tx_buf += m * 4;
403			st->tx_length -= m * 4;
404		}
405		n -= m;
406		if (st->tx_length == 0)
407			spi_engine_tx_next(msg);
408	}
409
410	return st->tx_length != 0;
411}
412
413static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
414				    struct spi_message *msg)
415{
416	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
417	struct spi_engine_message_state *st = msg->state;
418	unsigned int n, m, i;
 
419
420	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
421	while (n && st->rx_length) {
422		if (st->rx_xfer->bits_per_word <= 8) {
423			u8 *buf = st->rx_buf;
424
425			m = min(n, st->rx_length);
426			for (i = 0; i < m; i++)
427				buf[i] = readl_relaxed(addr);
428			st->rx_buf += m;
429			st->rx_length -= m;
430		} else if (st->rx_xfer->bits_per_word <= 16) {
431			u16 *buf = (u16 *)st->rx_buf;
432
433			m = min(n, st->rx_length / 2);
434			for (i = 0; i < m; i++)
435				buf[i] = readl_relaxed(addr);
436			st->rx_buf += m * 2;
437			st->rx_length -= m * 2;
438		} else {
439			u32 *buf = (u32 *)st->rx_buf;
440
441			m = min(n, st->rx_length / 4);
442			for (i = 0; i < m; i++)
443				buf[i] = readl_relaxed(addr);
444			st->rx_buf += m * 4;
445			st->rx_length -= m * 4;
446		}
447		n -= m;
448		if (st->rx_length == 0)
449			spi_engine_rx_next(msg);
450	}
451
452	return st->rx_length != 0;
453}
454
455static irqreturn_t spi_engine_irq(int irq, void *devid)
456{
457	struct spi_controller *host = devid;
458	struct spi_message *msg = host->cur_msg;
459	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
460	unsigned int disable_int = 0;
461	unsigned int pending;
462	int completed_id = -1;
463
464	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
465
466	if (pending & SPI_ENGINE_INT_SYNC) {
467		writel_relaxed(SPI_ENGINE_INT_SYNC,
468			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
469		completed_id = readl_relaxed(
470			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
471	}
472
473	spin_lock(&spi_engine->lock);
474
475	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
476		if (!spi_engine_write_cmd_fifo(spi_engine, msg))
477			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
478	}
479
480	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
481		if (!spi_engine_write_tx_fifo(spi_engine, msg))
482			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
483	}
484
485	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
486		if (!spi_engine_read_rx_fifo(spi_engine, msg))
487			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
488	}
489
490	if (pending & SPI_ENGINE_INT_SYNC && msg) {
491		struct spi_engine_message_state *st = msg->state;
492
493		if (completed_id == st->sync_id) {
494			if (timer_delete_sync(&spi_engine->watchdog_timer)) {
495				msg->status = 0;
496				msg->actual_length = msg->frame_length;
497				spi_finalize_current_message(host);
498			}
 
499			disable_int |= SPI_ENGINE_INT_SYNC;
500		}
501	}
502
503	if (disable_int) {
504		spi_engine->int_enable &= ~disable_int;
505		writel_relaxed(spi_engine->int_enable,
506			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
507	}
508
509	spin_unlock(&spi_engine->lock);
510
511	return IRQ_HANDLED;
512}
513
514static int spi_engine_prepare_message(struct spi_controller *host,
515				      struct spi_message *msg)
516{
517	struct spi_engine_program p_dry, *p;
518	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
519	struct spi_engine_message_state *st;
 
520	size_t size;
521	int ret;
522
523	st = kzalloc(sizeof(*st), GFP_KERNEL);
524	if (!st)
525		return -ENOMEM;
526
527	spi_engine_precompile_message(msg);
528
529	p_dry.length = 0;
530	spi_engine_compile_message(msg, true, &p_dry);
531
532	size = sizeof(*p->instructions) * (p_dry.length + 1);
533	p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
534	if (!p) {
535		kfree(st);
536		return -ENOMEM;
537	}
538
539	ret = ida_alloc_range(&spi_engine->sync_ida, 0, U8_MAX, GFP_KERNEL);
540	if (ret < 0) {
541		kfree(p);
542		kfree(st);
543		return ret;
544	}
545
546	st->sync_id = ret;
547
548	spi_engine_compile_message(msg, false, p);
549
550	spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(st->sync_id));
551
552	st->p = p;
553	st->cmd_buf = p->instructions;
554	st->cmd_length = p->length;
555	msg->state = st;
556
557	return 0;
558}
559
560static int spi_engine_unprepare_message(struct spi_controller *host,
561					struct spi_message *msg)
562{
563	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
564	struct spi_engine_message_state *st = msg->state;
565
566	ida_free(&spi_engine->sync_ida, st->sync_id);
567	kfree(st->p);
568	kfree(st);
569
570	return 0;
571}
572
573static int spi_engine_transfer_one_message(struct spi_controller *host,
574	struct spi_message *msg)
575{
576	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
577	struct spi_engine_message_state *st = msg->state;
578	unsigned int int_enable = 0;
579	unsigned long flags;
580
581	mod_timer(&spi_engine->watchdog_timer, jiffies + msecs_to_jiffies(5000));
582
583	spin_lock_irqsave(&spi_engine->lock, flags);
584
585	if (spi_engine_write_cmd_fifo(spi_engine, msg))
 
 
 
 
 
 
 
 
586		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
587
588	spi_engine_tx_next(msg);
589	if (spi_engine_write_tx_fifo(spi_engine, msg))
590		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
591
592	spi_engine_rx_next(msg);
593	if (st->rx_length != 0)
594		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
595
596	int_enable |= SPI_ENGINE_INT_SYNC;
597
598	writel_relaxed(int_enable,
599		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
600	spi_engine->int_enable = int_enable;
601	spin_unlock_irqrestore(&spi_engine->lock, flags);
602
603	return 0;
604}
605
606static void spi_engine_timeout(struct timer_list *timer)
607{
608	struct spi_engine *spi_engine = from_timer(spi_engine, timer, watchdog_timer);
609	struct spi_controller *host = spi_engine->controller;
610
611	if (WARN_ON(!host->cur_msg))
612		return;
613
614	dev_err(&host->dev,
615		"Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
616	host->cur_msg->status = -ETIMEDOUT;
617	spi_finalize_current_message(host);
618}
619
620static void spi_engine_release_hw(void *p)
621{
622	struct spi_engine *spi_engine = p;
623
624	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
625	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
626	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
627}
628
629static int spi_engine_probe(struct platform_device *pdev)
630{
631	struct spi_engine *spi_engine;
632	struct spi_controller *host;
633	unsigned int version;
634	int irq;
635	int ret;
636
637	irq = platform_get_irq(pdev, 0);
638	if (irq < 0)
639		return irq;
 
 
 
 
640
641	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
642	if (!host)
643		return -ENOMEM;
644
645	spi_engine = spi_controller_get_devdata(host);
646
647	spin_lock_init(&spi_engine->lock);
648	ida_init(&spi_engine->sync_ida);
649	timer_setup(&spi_engine->watchdog_timer, spi_engine_timeout, TIMER_IRQSAFE);
650	spi_engine->controller = host;
651
652	spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
653	if (IS_ERR(spi_engine->clk))
654		return PTR_ERR(spi_engine->clk);
655
656	spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
657	if (IS_ERR(spi_engine->ref_clk))
658		return PTR_ERR(spi_engine->ref_clk);
 
 
 
 
 
 
 
 
 
659
660	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
661	if (IS_ERR(spi_engine->base))
662		return PTR_ERR(spi_engine->base);
 
 
663
664	version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
665	if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
666		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
667			SPI_ENGINE_VERSION_MAJOR(version),
668			SPI_ENGINE_VERSION_MINOR(version),
669			SPI_ENGINE_VERSION_PATCH(version));
670		return -ENODEV;
 
671	}
672
673	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
674	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
675	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
676
677	ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
678				       spi_engine);
679	if (ret)
680		return ret;
681
682	ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
683			       host);
 
 
 
 
 
 
684	if (ret)
685		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
686
687	host->dev.of_node = pdev->dev.of_node;
688	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
689	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
690	host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
691	host->transfer_one_message = spi_engine_transfer_one_message;
692	host->prepare_message = spi_engine_prepare_message;
693	host->unprepare_message = spi_engine_unprepare_message;
694	host->num_chipselect = 8;
695
696	if (host->max_speed_hz == 0)
697		return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
698
699	ret = devm_spi_register_controller(&pdev->dev, host);
700	if (ret)
701		return ret;
 
 
702
703	platform_set_drvdata(pdev, host);
 
704
705	return 0;
706}
707
708static const struct of_device_id spi_engine_match_table[] = {
709	{ .compatible = "adi,axi-spi-engine-1.00.a" },
710	{ },
711};
712MODULE_DEVICE_TABLE(of, spi_engine_match_table);
713
714static struct platform_driver spi_engine_driver = {
715	.probe = spi_engine_probe,
 
716	.driver = {
717		.name = "spi-engine",
718		.of_match_table = spi_engine_match_table,
719	},
720};
721module_platform_driver(spi_engine_driver);
722
723MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
724MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
725MODULE_LICENSE("GPL");