Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * TI QSPI driver
  4 *
  5 * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
  6 * Author: Sourav Poddar <sourav.poddar@ti.com>
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/init.h>
 11#include <linux/interrupt.h>
 12#include <linux/module.h>
 13#include <linux/device.h>
 14#include <linux/delay.h>
 15#include <linux/dma-mapping.h>
 16#include <linux/dmaengine.h>
 17#include <linux/omap-dma.h>
 18#include <linux/platform_device.h>
 19#include <linux/err.h>
 20#include <linux/clk.h>
 21#include <linux/io.h>
 22#include <linux/slab.h>
 23#include <linux/pm_runtime.h>
 24#include <linux/of.h>
 25#include <linux/of_device.h>
 26#include <linux/pinctrl/consumer.h>
 27#include <linux/mfd/syscon.h>
 28#include <linux/regmap.h>
 29#include <linux/sizes.h>
 30
 31#include <linux/spi/spi.h>
 32#include <linux/spi/spi-mem.h>
 33
 34struct ti_qspi_regs {
 35	u32 clkctrl;
 36};
 37
 38struct ti_qspi {
 39	struct completion	transfer_complete;
 40
 41	/* list synchronization */
 42	struct mutex            list_lock;
 43
 44	struct spi_master	*master;
 45	void __iomem            *base;
 46	void __iomem            *mmap_base;
 47	size_t			mmap_size;
 48	struct regmap		*ctrl_base;
 49	unsigned int		ctrl_reg;
 50	struct clk		*fclk;
 51	struct device           *dev;
 52
 53	struct ti_qspi_regs     ctx_reg;
 54
 55	dma_addr_t		mmap_phys_base;
 56	dma_addr_t		rx_bb_dma_addr;
 57	void			*rx_bb_addr;
 58	struct dma_chan		*rx_chan;
 59
 60	u32 spi_max_frequency;
 61	u32 cmd;
 62	u32 dc;
 63
 64	bool mmap_enabled;
 65	int current_cs;
 66};
 67
 68#define QSPI_PID			(0x0)
 69#define QSPI_SYSCONFIG			(0x10)
 70#define QSPI_SPI_CLOCK_CNTRL_REG	(0x40)
 71#define QSPI_SPI_DC_REG			(0x44)
 72#define QSPI_SPI_CMD_REG		(0x48)
 73#define QSPI_SPI_STATUS_REG		(0x4c)
 74#define QSPI_SPI_DATA_REG		(0x50)
 75#define QSPI_SPI_SETUP_REG(n)		((0x54 + 4 * n))
 76#define QSPI_SPI_SWITCH_REG		(0x64)
 77#define QSPI_SPI_DATA_REG_1		(0x68)
 78#define QSPI_SPI_DATA_REG_2		(0x6c)
 79#define QSPI_SPI_DATA_REG_3		(0x70)
 80
 81#define QSPI_COMPLETION_TIMEOUT		msecs_to_jiffies(2000)
 82
 
 
 83/* Clock Control */
 84#define QSPI_CLK_EN			(1 << 31)
 85#define QSPI_CLK_DIV_MAX		0xffff
 86
 87/* Command */
 88#define QSPI_EN_CS(n)			(n << 28)
 89#define QSPI_WLEN(n)			((n - 1) << 19)
 90#define QSPI_3_PIN			(1 << 18)
 91#define QSPI_RD_SNGL			(1 << 16)
 92#define QSPI_WR_SNGL			(2 << 16)
 93#define QSPI_RD_DUAL			(3 << 16)
 94#define QSPI_RD_QUAD			(7 << 16)
 95#define QSPI_INVAL			(4 << 16)
 96#define QSPI_FLEN(n)			((n - 1) << 0)
 97#define QSPI_WLEN_MAX_BITS		128
 98#define QSPI_WLEN_MAX_BYTES		16
 99#define QSPI_WLEN_MASK			QSPI_WLEN(QSPI_WLEN_MAX_BITS)
100
101/* STATUS REGISTER */
102#define BUSY				0x01
103#define WC				0x02
104
105/* Device Control */
106#define QSPI_DD(m, n)			(m << (3 + n * 8))
107#define QSPI_CKPHA(n)			(1 << (2 + n * 8))
108#define QSPI_CSPOL(n)			(1 << (1 + n * 8))
109#define QSPI_CKPOL(n)			(1 << (n * 8))
110
111#define	QSPI_FRAME			4096
112
113#define QSPI_AUTOSUSPEND_TIMEOUT         2000
114
115#define MEM_CS_EN(n)			((n + 1) << 8)
116#define MEM_CS_MASK			(7 << 8)
117
118#define MM_SWITCH			0x1
119
120#define QSPI_SETUP_RD_NORMAL		(0x0 << 12)
121#define QSPI_SETUP_RD_DUAL		(0x1 << 12)
122#define QSPI_SETUP_RD_QUAD		(0x3 << 12)
123#define QSPI_SETUP_ADDR_SHIFT		8
124#define QSPI_SETUP_DUMMY_SHIFT		10
125
126#define QSPI_DMA_BUFFER_SIZE            SZ_64K
127
128static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
129		unsigned long reg)
130{
131	return readl(qspi->base + reg);
132}
133
134static inline void ti_qspi_write(struct ti_qspi *qspi,
135		unsigned long val, unsigned long reg)
136{
137	writel(val, qspi->base + reg);
138}
139
140static int ti_qspi_setup(struct spi_device *spi)
141{
142	struct ti_qspi	*qspi = spi_master_get_devdata(spi->master);
143	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
144	int clk_div = 0, ret;
145	u32 clk_ctrl_reg, clk_rate, clk_mask;
146
147	if (spi->master->busy) {
148		dev_dbg(qspi->dev, "master busy doing other transfers\n");
149		return -EBUSY;
150	}
151
152	if (!qspi->spi_max_frequency) {
153		dev_err(qspi->dev, "spi max frequency not defined\n");
154		return -EINVAL;
155	}
156
157	clk_rate = clk_get_rate(qspi->fclk);
158
159	clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1;
160
161	if (clk_div < 0) {
162		dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n");
163		return -EINVAL;
164	}
165
166	if (clk_div > QSPI_CLK_DIV_MAX) {
167		dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n",
168				QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1);
169		return -EINVAL;
170	}
171
172	dev_dbg(qspi->dev, "hz: %d, clock divider %d\n",
173			qspi->spi_max_frequency, clk_div);
174
175	ret = pm_runtime_get_sync(qspi->dev);
176	if (ret < 0) {
177		dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
178		return ret;
179	}
180
181	clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
182
183	clk_ctrl_reg &= ~QSPI_CLK_EN;
184
185	/* disable SCLK */
186	ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
187
188	/* enable SCLK */
189	clk_mask = QSPI_CLK_EN | clk_div;
190	ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG);
191	ctx_reg->clkctrl = clk_mask;
192
193	pm_runtime_mark_last_busy(qspi->dev);
194	ret = pm_runtime_put_autosuspend(qspi->dev);
195	if (ret < 0) {
196		dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
197		return ret;
198	}
199
200	return 0;
201}
202
203static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
204{
205	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
206
207	ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
208}
209
210static inline u32 qspi_is_busy(struct ti_qspi *qspi)
211{
212	u32 stat;
213	unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
214
215	stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
216	while ((stat & BUSY) && time_after(timeout, jiffies)) {
217		cpu_relax();
218		stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
219	}
220
221	WARN(stat & BUSY, "qspi busy\n");
222	return stat & BUSY;
223}
224
225static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
226{
227	u32 stat;
228	unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
229
230	do {
231		stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
232		if (stat & WC)
233			return 0;
234		cpu_relax();
235	} while (time_after(timeout, jiffies));
236
237	stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
238	if (stat & WC)
239		return 0;
240	return  -ETIMEDOUT;
241}
242
243static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
244			  int count)
245{
246	int wlen, xfer_len;
247	unsigned int cmd;
248	const u8 *txbuf;
249	u32 data;
250
251	txbuf = t->tx_buf;
252	cmd = qspi->cmd | QSPI_WR_SNGL;
253	wlen = t->bits_per_word >> 3;	/* in bytes */
254	xfer_len = wlen;
255
256	while (count) {
257		if (qspi_is_busy(qspi))
258			return -EBUSY;
259
260		switch (wlen) {
261		case 1:
262			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
263					cmd, qspi->dc, *txbuf);
264			if (count >= QSPI_WLEN_MAX_BYTES) {
265				u32 *txp = (u32 *)txbuf;
266
267				data = cpu_to_be32(*txp++);
268				writel(data, qspi->base +
269				       QSPI_SPI_DATA_REG_3);
270				data = cpu_to_be32(*txp++);
271				writel(data, qspi->base +
272				       QSPI_SPI_DATA_REG_2);
273				data = cpu_to_be32(*txp++);
274				writel(data, qspi->base +
275				       QSPI_SPI_DATA_REG_1);
276				data = cpu_to_be32(*txp++);
277				writel(data, qspi->base +
278				       QSPI_SPI_DATA_REG);
279				xfer_len = QSPI_WLEN_MAX_BYTES;
280				cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
281			} else {
282				writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
283				cmd = qspi->cmd | QSPI_WR_SNGL;
284				xfer_len = wlen;
285				cmd |= QSPI_WLEN(wlen);
286			}
287			break;
288		case 2:
289			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
290					cmd, qspi->dc, *txbuf);
291			writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
292			break;
293		case 4:
294			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
295					cmd, qspi->dc, *txbuf);
296			writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
297			break;
298		}
299
300		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
301		if (ti_qspi_poll_wc(qspi)) {
302			dev_err(qspi->dev, "write timed out\n");
303			return -ETIMEDOUT;
304		}
305		txbuf += xfer_len;
306		count -= xfer_len;
307	}
308
309	return 0;
310}
311
312static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
313			 int count)
314{
315	int wlen;
316	unsigned int cmd;
317	u32 rx;
318	u8 rxlen, rx_wlen;
319	u8 *rxbuf;
320
321	rxbuf = t->rx_buf;
322	cmd = qspi->cmd;
323	switch (t->rx_nbits) {
324	case SPI_NBITS_DUAL:
325		cmd |= QSPI_RD_DUAL;
326		break;
327	case SPI_NBITS_QUAD:
328		cmd |= QSPI_RD_QUAD;
329		break;
330	default:
331		cmd |= QSPI_RD_SNGL;
332		break;
333	}
334	wlen = t->bits_per_word >> 3;	/* in bytes */
335	rx_wlen = wlen;
336
337	while (count) {
338		dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
339		if (qspi_is_busy(qspi))
340			return -EBUSY;
341
342		switch (wlen) {
343		case 1:
344			/*
345			 * Optimize the 8-bit words transfers, as used by
346			 * the SPI flash devices.
347			 */
348			if (count >= QSPI_WLEN_MAX_BYTES) {
349				rxlen = QSPI_WLEN_MAX_BYTES;
350			} else {
351				rxlen = min(count, 4);
352			}
353			rx_wlen = rxlen << 3;
354			cmd &= ~QSPI_WLEN_MASK;
355			cmd |= QSPI_WLEN(rx_wlen);
356			break;
357		default:
358			rxlen = wlen;
359			break;
360		}
361
362		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
363		if (ti_qspi_poll_wc(qspi)) {
364			dev_err(qspi->dev, "read timed out\n");
365			return -ETIMEDOUT;
366		}
367
368		switch (wlen) {
369		case 1:
370			/*
371			 * Optimize the 8-bit words transfers, as used by
372			 * the SPI flash devices.
373			 */
374			if (count >= QSPI_WLEN_MAX_BYTES) {
375				u32 *rxp = (u32 *) rxbuf;
376				rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
377				*rxp++ = be32_to_cpu(rx);
378				rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
379				*rxp++ = be32_to_cpu(rx);
380				rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
381				*rxp++ = be32_to_cpu(rx);
382				rx = readl(qspi->base + QSPI_SPI_DATA_REG);
383				*rxp++ = be32_to_cpu(rx);
384			} else {
385				u8 *rxp = rxbuf;
386				rx = readl(qspi->base + QSPI_SPI_DATA_REG);
387				if (rx_wlen >= 8)
388					*rxp++ = rx >> (rx_wlen - 8);
389				if (rx_wlen >= 16)
390					*rxp++ = rx >> (rx_wlen - 16);
391				if (rx_wlen >= 24)
392					*rxp++ = rx >> (rx_wlen - 24);
393				if (rx_wlen >= 32)
394					*rxp++ = rx;
395			}
396			break;
397		case 2:
398			*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
399			break;
400		case 4:
401			*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
402			break;
403		}
404		rxbuf += rxlen;
405		count -= rxlen;
406	}
407
408	return 0;
409}
410
411static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
412			     int count)
413{
414	int ret;
415
416	if (t->tx_buf) {
417		ret = qspi_write_msg(qspi, t, count);
418		if (ret) {
419			dev_dbg(qspi->dev, "Error while writing\n");
420			return ret;
421		}
422	}
423
424	if (t->rx_buf) {
425		ret = qspi_read_msg(qspi, t, count);
426		if (ret) {
427			dev_dbg(qspi->dev, "Error while reading\n");
428			return ret;
429		}
430	}
431
432	return 0;
433}
434
435static void ti_qspi_dma_callback(void *param)
436{
437	struct ti_qspi *qspi = param;
438
439	complete(&qspi->transfer_complete);
440}
441
442static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
443			    dma_addr_t dma_src, size_t len)
444{
445	struct dma_chan *chan = qspi->rx_chan;
446	dma_cookie_t cookie;
447	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
448	struct dma_async_tx_descriptor *tx;
449	int ret;
450
451	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
452	if (!tx) {
453		dev_err(qspi->dev, "device_prep_dma_memcpy error\n");
454		return -EIO;
455	}
456
457	tx->callback = ti_qspi_dma_callback;
458	tx->callback_param = qspi;
459	cookie = tx->tx_submit(tx);
460	reinit_completion(&qspi->transfer_complete);
461
462	ret = dma_submit_error(cookie);
463	if (ret) {
464		dev_err(qspi->dev, "dma_submit_error %d\n", cookie);
465		return -EIO;
466	}
467
468	dma_async_issue_pending(chan);
469	ret = wait_for_completion_timeout(&qspi->transfer_complete,
470					  msecs_to_jiffies(len));
471	if (ret <= 0) {
472		dmaengine_terminate_sync(chan);
473		dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
474		return -ETIMEDOUT;
475	}
476
477	return 0;
478}
479
480static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, loff_t offs,
481				     void *to, size_t readsize)
482{
483	dma_addr_t dma_src = qspi->mmap_phys_base + offs;
484	int ret = 0;
485
486	/*
487	 * Use bounce buffer as FS like jffs2, ubifs may pass
488	 * buffers that does not belong to kernel lowmem region.
489	 */
490	while (readsize != 0) {
491		size_t xfer_len = min_t(size_t, QSPI_DMA_BUFFER_SIZE,
492					readsize);
493
494		ret = ti_qspi_dma_xfer(qspi, qspi->rx_bb_dma_addr,
495				       dma_src, xfer_len);
496		if (ret != 0)
497			return ret;
498		memcpy(to, qspi->rx_bb_addr, xfer_len);
499		readsize -= xfer_len;
500		dma_src += xfer_len;
501		to += xfer_len;
502	}
503
504	return ret;
505}
506
507static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
508			       loff_t from)
509{
510	struct scatterlist *sg;
511	dma_addr_t dma_src = qspi->mmap_phys_base + from;
512	dma_addr_t dma_dst;
513	int i, len, ret;
514
515	for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) {
516		dma_dst = sg_dma_address(sg);
517		len = sg_dma_len(sg);
518		ret = ti_qspi_dma_xfer(qspi, dma_dst, dma_src, len);
519		if (ret)
520			return ret;
521		dma_src += len;
522	}
523
524	return 0;
525}
526
527static void ti_qspi_enable_memory_map(struct spi_device *spi)
528{
529	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
530
531	ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
532	if (qspi->ctrl_base) {
533		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
534				   MEM_CS_MASK,
535				   MEM_CS_EN(spi->chip_select));
536	}
537	qspi->mmap_enabled = true;
538	qspi->current_cs = spi->chip_select;
539}
540
541static void ti_qspi_disable_memory_map(struct spi_device *spi)
542{
543	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
544
545	ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
546	if (qspi->ctrl_base)
547		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
548				   MEM_CS_MASK, 0);
549	qspi->mmap_enabled = false;
550	qspi->current_cs = -1;
551}
552
553static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
554				    u8 data_nbits, u8 addr_width,
555				    u8 dummy_bytes)
556{
557	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
558	u32 memval = opcode;
559
560	switch (data_nbits) {
561	case SPI_NBITS_QUAD:
562		memval |= QSPI_SETUP_RD_QUAD;
563		break;
564	case SPI_NBITS_DUAL:
565		memval |= QSPI_SETUP_RD_DUAL;
566		break;
567	default:
568		memval |= QSPI_SETUP_RD_NORMAL;
569		break;
570	}
571	memval |= ((addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
572		   dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
573	ti_qspi_write(qspi, memval,
574		      QSPI_SPI_SETUP_REG(spi->chip_select));
575}
576
577static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
578{
579	struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
580	size_t max_len;
581
582	if (op->data.dir == SPI_MEM_DATA_IN) {
583		if (op->addr.val < qspi->mmap_size) {
584			/* Limit MMIO to the mmaped region */
585			if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
586				max_len = qspi->mmap_size - op->addr.val;
587				op->data.nbytes = min((size_t) op->data.nbytes,
588						      max_len);
589			}
590		} else {
591			/*
592			 * Use fallback mode (SW generated transfers) above the
593			 * mmaped region.
594			 * Adjust size to comply with the QSPI max frame length.
595			 */
596			max_len = QSPI_FRAME;
597			max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
598			op->data.nbytes = min((size_t) op->data.nbytes,
599					      max_len);
600		}
601	}
602
603	return 0;
604}
605
606static int ti_qspi_exec_mem_op(struct spi_mem *mem,
607			       const struct spi_mem_op *op)
608{
609	struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
610	u32 from = 0;
611	int ret = 0;
612
613	/* Only optimize read path. */
614	if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
615	    !op->addr.nbytes || op->addr.nbytes > 4)
616		return -ENOTSUPP;
617
618	/* Address exceeds MMIO window size, fall back to regular mode. */
619	from = op->addr.val;
620	if (from + op->data.nbytes > qspi->mmap_size)
621		return -ENOTSUPP;
622
623	mutex_lock(&qspi->list_lock);
624
625	if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select)
626		ti_qspi_enable_memory_map(mem->spi);
627	ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
628				op->addr.nbytes, op->dummy.nbytes);
629
630	if (qspi->rx_chan) {
631		struct sg_table sgt;
632
633		if (virt_addr_valid(op->data.buf.in) &&
634		    !spi_controller_dma_map_mem_op_data(mem->spi->master, op,
635							&sgt)) {
636			ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
637			spi_controller_dma_unmap_mem_op_data(mem->spi->master,
638							     op, &sgt);
639		} else {
640			ret = ti_qspi_dma_bounce_buffer(qspi, from,
641							op->data.buf.in,
642							op->data.nbytes);
643		}
644	} else {
645		memcpy_fromio(op->data.buf.in, qspi->mmap_base + from,
646			      op->data.nbytes);
647	}
648
649	mutex_unlock(&qspi->list_lock);
650
651	return ret;
652}
653
654static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
655	.exec_op = ti_qspi_exec_mem_op,
656	.adjust_op_size = ti_qspi_adjust_op_size,
657};
658
659static int ti_qspi_start_transfer_one(struct spi_master *master,
660		struct spi_message *m)
661{
662	struct ti_qspi *qspi = spi_master_get_devdata(master);
663	struct spi_device *spi = m->spi;
664	struct spi_transfer *t;
665	int status = 0, ret;
666	unsigned int frame_len_words, transfer_len_words;
667	int wlen;
668
669	/* setup device control reg */
670	qspi->dc = 0;
671
672	if (spi->mode & SPI_CPHA)
673		qspi->dc |= QSPI_CKPHA(spi->chip_select);
674	if (spi->mode & SPI_CPOL)
675		qspi->dc |= QSPI_CKPOL(spi->chip_select);
676	if (spi->mode & SPI_CS_HIGH)
677		qspi->dc |= QSPI_CSPOL(spi->chip_select);
678
679	frame_len_words = 0;
680	list_for_each_entry(t, &m->transfers, transfer_list)
681		frame_len_words += t->len / (t->bits_per_word >> 3);
682	frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
683
684	/* setup command reg */
685	qspi->cmd = 0;
686	qspi->cmd |= QSPI_EN_CS(spi->chip_select);
687	qspi->cmd |= QSPI_FLEN(frame_len_words);
688
689	ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
690
691	mutex_lock(&qspi->list_lock);
692
693	if (qspi->mmap_enabled)
694		ti_qspi_disable_memory_map(spi);
695
696	list_for_each_entry(t, &m->transfers, transfer_list) {
697		qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
698			     QSPI_WLEN(t->bits_per_word));
699
700		wlen = t->bits_per_word >> 3;
701		transfer_len_words = min(t->len / wlen, frame_len_words);
702
703		ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
704		if (ret) {
705			dev_dbg(qspi->dev, "transfer message failed\n");
706			mutex_unlock(&qspi->list_lock);
707			return -EINVAL;
708		}
709
710		m->actual_length += transfer_len_words * wlen;
711		frame_len_words -= transfer_len_words;
712		if (frame_len_words == 0)
713			break;
714	}
715
716	mutex_unlock(&qspi->list_lock);
717
718	ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
719	m->status = status;
720	spi_finalize_current_message(master);
721
722	return status;
723}
724
725static int ti_qspi_runtime_resume(struct device *dev)
726{
727	struct ti_qspi      *qspi;
728
729	qspi = dev_get_drvdata(dev);
730	ti_qspi_restore_ctx(qspi);
731
732	return 0;
733}
734
735static const struct of_device_id ti_qspi_match[] = {
736	{.compatible = "ti,dra7xxx-qspi" },
737	{.compatible = "ti,am4372-qspi" },
738	{},
739};
740MODULE_DEVICE_TABLE(of, ti_qspi_match);
741
742static int ti_qspi_probe(struct platform_device *pdev)
743{
744	struct  ti_qspi *qspi;
745	struct spi_master *master;
746	struct resource         *r, *res_mmap;
747	struct device_node *np = pdev->dev.of_node;
748	u32 max_freq;
749	int ret = 0, num_cs, irq;
750	dma_cap_mask_t mask;
751
752	master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
753	if (!master)
754		return -ENOMEM;
755
756	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
757
758	master->flags = SPI_MASTER_HALF_DUPLEX;
759	master->setup = ti_qspi_setup;
760	master->auto_runtime_pm = true;
761	master->transfer_one_message = ti_qspi_start_transfer_one;
762	master->dev.of_node = pdev->dev.of_node;
763	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
764				     SPI_BPW_MASK(8);
765	master->mem_ops = &ti_qspi_mem_ops;
766
767	if (!of_property_read_u32(np, "num-cs", &num_cs))
768		master->num_chipselect = num_cs;
769
770	qspi = spi_master_get_devdata(master);
771	qspi->master = master;
772	qspi->dev = &pdev->dev;
773	platform_set_drvdata(pdev, qspi);
774
775	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
776	if (r == NULL) {
777		r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
778		if (r == NULL) {
779			dev_err(&pdev->dev, "missing platform data\n");
780			ret = -ENODEV;
781			goto free_master;
782		}
783	}
784
785	res_mmap = platform_get_resource_byname(pdev,
786			IORESOURCE_MEM, "qspi_mmap");
787	if (res_mmap == NULL) {
788		res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
789		if (res_mmap == NULL) {
790			dev_err(&pdev->dev,
791				"memory mapped resource not required\n");
792		}
793	}
794
795	if (res_mmap)
796		qspi->mmap_size = resource_size(res_mmap);
797
798	irq = platform_get_irq(pdev, 0);
799	if (irq < 0) {
800		ret = irq;
801		goto free_master;
802	}
803
804	mutex_init(&qspi->list_lock);
805
806	qspi->base = devm_ioremap_resource(&pdev->dev, r);
807	if (IS_ERR(qspi->base)) {
808		ret = PTR_ERR(qspi->base);
809		goto free_master;
810	}
811
812
813	if (of_property_read_bool(np, "syscon-chipselects")) {
814		qspi->ctrl_base =
815		syscon_regmap_lookup_by_phandle(np,
816						"syscon-chipselects");
817		if (IS_ERR(qspi->ctrl_base)) {
818			ret = PTR_ERR(qspi->ctrl_base);
819			goto free_master;
820		}
821		ret = of_property_read_u32_index(np,
822						 "syscon-chipselects",
823						 1, &qspi->ctrl_reg);
824		if (ret) {
825			dev_err(&pdev->dev,
826				"couldn't get ctrl_mod reg index\n");
827			goto free_master;
828		}
829	}
830
831	qspi->fclk = devm_clk_get(&pdev->dev, "fck");
832	if (IS_ERR(qspi->fclk)) {
833		ret = PTR_ERR(qspi->fclk);
834		dev_err(&pdev->dev, "could not get clk: %d\n", ret);
835	}
836
837	pm_runtime_use_autosuspend(&pdev->dev);
838	pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
839	pm_runtime_enable(&pdev->dev);
840
841	if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
842		qspi->spi_max_frequency = max_freq;
843
844	dma_cap_zero(mask);
845	dma_cap_set(DMA_MEMCPY, mask);
846
847	qspi->rx_chan = dma_request_chan_by_mask(&mask);
848	if (IS_ERR(qspi->rx_chan)) {
849		dev_err(qspi->dev,
850			"No Rx DMA available, trying mmap mode\n");
851		qspi->rx_chan = NULL;
852		ret = 0;
853		goto no_dma;
854	}
855	qspi->rx_bb_addr = dma_alloc_coherent(qspi->dev,
856					      QSPI_DMA_BUFFER_SIZE,
857					      &qspi->rx_bb_dma_addr,
858					      GFP_KERNEL | GFP_DMA);
859	if (!qspi->rx_bb_addr) {
860		dev_err(qspi->dev,
861			"dma_alloc_coherent failed, using PIO mode\n");
862		dma_release_channel(qspi->rx_chan);
863		goto no_dma;
864	}
865	master->dma_rx = qspi->rx_chan;
866	init_completion(&qspi->transfer_complete);
867	if (res_mmap)
868		qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
869
870no_dma:
871	if (!qspi->rx_chan && res_mmap) {
872		qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
873		if (IS_ERR(qspi->mmap_base)) {
874			dev_info(&pdev->dev,
875				 "mmap failed with error %ld using PIO mode\n",
876				 PTR_ERR(qspi->mmap_base));
877			qspi->mmap_base = NULL;
878			master->mem_ops = NULL;
879		}
880	}
881	qspi->mmap_enabled = false;
882	qspi->current_cs = -1;
883
884	ret = devm_spi_register_master(&pdev->dev, master);
885	if (!ret)
886		return 0;
887
888	pm_runtime_disable(&pdev->dev);
889free_master:
890	spi_master_put(master);
891	return ret;
892}
893
894static int ti_qspi_remove(struct platform_device *pdev)
895{
896	struct ti_qspi *qspi = platform_get_drvdata(pdev);
897	int rc;
898
899	rc = spi_master_suspend(qspi->master);
900	if (rc)
901		return rc;
902
903	pm_runtime_put_sync(&pdev->dev);
904	pm_runtime_disable(&pdev->dev);
905
906	if (qspi->rx_bb_addr)
907		dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
908				  qspi->rx_bb_addr,
909				  qspi->rx_bb_dma_addr);
910	if (qspi->rx_chan)
911		dma_release_channel(qspi->rx_chan);
912
913	return 0;
914}
915
916static const struct dev_pm_ops ti_qspi_pm_ops = {
917	.runtime_resume = ti_qspi_runtime_resume,
918};
919
920static struct platform_driver ti_qspi_driver = {
921	.probe	= ti_qspi_probe,
922	.remove = ti_qspi_remove,
923	.driver = {
924		.name	= "ti-qspi",
925		.pm =   &ti_qspi_pm_ops,
926		.of_match_table = ti_qspi_match,
927	}
928};
929
930module_platform_driver(ti_qspi_driver);
931
932MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
933MODULE_LICENSE("GPL v2");
934MODULE_DESCRIPTION("TI QSPI controller driver");
935MODULE_ALIAS("platform:ti-qspi");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * TI QSPI driver
  4 *
  5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
  6 * Author: Sourav Poddar <sourav.poddar@ti.com>
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/init.h>
 11#include <linux/interrupt.h>
 12#include <linux/module.h>
 13#include <linux/device.h>
 14#include <linux/delay.h>
 15#include <linux/dma-mapping.h>
 16#include <linux/dmaengine.h>
 17#include <linux/omap-dma.h>
 18#include <linux/platform_device.h>
 19#include <linux/err.h>
 20#include <linux/clk.h>
 21#include <linux/io.h>
 22#include <linux/slab.h>
 23#include <linux/pm_runtime.h>
 24#include <linux/of.h>
 25#include <linux/of_device.h>
 26#include <linux/pinctrl/consumer.h>
 27#include <linux/mfd/syscon.h>
 28#include <linux/regmap.h>
 29#include <linux/sizes.h>
 30
 31#include <linux/spi/spi.h>
 32#include <linux/spi/spi-mem.h>
 33
 34struct ti_qspi_regs {
 35	u32 clkctrl;
 36};
 37
 38struct ti_qspi {
 39	struct completion	transfer_complete;
 40
 41	/* list synchronization */
 42	struct mutex            list_lock;
 43
 44	struct spi_master	*master;
 45	void __iomem            *base;
 46	void __iomem            *mmap_base;
 47	size_t			mmap_size;
 48	struct regmap		*ctrl_base;
 49	unsigned int		ctrl_reg;
 50	struct clk		*fclk;
 51	struct device           *dev;
 52
 53	struct ti_qspi_regs     ctx_reg;
 54
 55	dma_addr_t		mmap_phys_base;
 56	dma_addr_t		rx_bb_dma_addr;
 57	void			*rx_bb_addr;
 58	struct dma_chan		*rx_chan;
 59
 60	u32 spi_max_frequency;
 61	u32 cmd;
 62	u32 dc;
 63
 64	bool mmap_enabled;
 
 65};
 66
 67#define QSPI_PID			(0x0)
 68#define QSPI_SYSCONFIG			(0x10)
 69#define QSPI_SPI_CLOCK_CNTRL_REG	(0x40)
 70#define QSPI_SPI_DC_REG			(0x44)
 71#define QSPI_SPI_CMD_REG		(0x48)
 72#define QSPI_SPI_STATUS_REG		(0x4c)
 73#define QSPI_SPI_DATA_REG		(0x50)
 74#define QSPI_SPI_SETUP_REG(n)		((0x54 + 4 * n))
 75#define QSPI_SPI_SWITCH_REG		(0x64)
 76#define QSPI_SPI_DATA_REG_1		(0x68)
 77#define QSPI_SPI_DATA_REG_2		(0x6c)
 78#define QSPI_SPI_DATA_REG_3		(0x70)
 79
 80#define QSPI_COMPLETION_TIMEOUT		msecs_to_jiffies(2000)
 81
 82#define QSPI_FCLK			192000000
 83
 84/* Clock Control */
 85#define QSPI_CLK_EN			(1 << 31)
 86#define QSPI_CLK_DIV_MAX		0xffff
 87
 88/* Command */
 89#define QSPI_EN_CS(n)			(n << 28)
 90#define QSPI_WLEN(n)			((n - 1) << 19)
 91#define QSPI_3_PIN			(1 << 18)
 92#define QSPI_RD_SNGL			(1 << 16)
 93#define QSPI_WR_SNGL			(2 << 16)
 94#define QSPI_RD_DUAL			(3 << 16)
 95#define QSPI_RD_QUAD			(7 << 16)
 96#define QSPI_INVAL			(4 << 16)
 97#define QSPI_FLEN(n)			((n - 1) << 0)
 98#define QSPI_WLEN_MAX_BITS		128
 99#define QSPI_WLEN_MAX_BYTES		16
100#define QSPI_WLEN_MASK			QSPI_WLEN(QSPI_WLEN_MAX_BITS)
101
102/* STATUS REGISTER */
103#define BUSY				0x01
104#define WC				0x02
105
106/* Device Control */
107#define QSPI_DD(m, n)			(m << (3 + n * 8))
108#define QSPI_CKPHA(n)			(1 << (2 + n * 8))
109#define QSPI_CSPOL(n)			(1 << (1 + n * 8))
110#define QSPI_CKPOL(n)			(1 << (n * 8))
111
112#define	QSPI_FRAME			4096
113
114#define QSPI_AUTOSUSPEND_TIMEOUT         2000
115
116#define MEM_CS_EN(n)			((n + 1) << 8)
117#define MEM_CS_MASK			(7 << 8)
118
119#define MM_SWITCH			0x1
120
121#define QSPI_SETUP_RD_NORMAL		(0x0 << 12)
122#define QSPI_SETUP_RD_DUAL		(0x1 << 12)
123#define QSPI_SETUP_RD_QUAD		(0x3 << 12)
124#define QSPI_SETUP_ADDR_SHIFT		8
125#define QSPI_SETUP_DUMMY_SHIFT		10
126
127#define QSPI_DMA_BUFFER_SIZE            SZ_64K
128
129static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
130		unsigned long reg)
131{
132	return readl(qspi->base + reg);
133}
134
135static inline void ti_qspi_write(struct ti_qspi *qspi,
136		unsigned long val, unsigned long reg)
137{
138	writel(val, qspi->base + reg);
139}
140
141static int ti_qspi_setup(struct spi_device *spi)
142{
143	struct ti_qspi	*qspi = spi_master_get_devdata(spi->master);
144	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
145	int clk_div = 0, ret;
146	u32 clk_ctrl_reg, clk_rate, clk_mask;
147
148	if (spi->master->busy) {
149		dev_dbg(qspi->dev, "master busy doing other transfers\n");
150		return -EBUSY;
151	}
152
153	if (!qspi->spi_max_frequency) {
154		dev_err(qspi->dev, "spi max frequency not defined\n");
155		return -EINVAL;
156	}
157
158	clk_rate = clk_get_rate(qspi->fclk);
159
160	clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1;
161
162	if (clk_div < 0) {
163		dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n");
164		return -EINVAL;
165	}
166
167	if (clk_div > QSPI_CLK_DIV_MAX) {
168		dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n",
169				QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1);
170		return -EINVAL;
171	}
172
173	dev_dbg(qspi->dev, "hz: %d, clock divider %d\n",
174			qspi->spi_max_frequency, clk_div);
175
176	ret = pm_runtime_get_sync(qspi->dev);
177	if (ret < 0) {
178		dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
179		return ret;
180	}
181
182	clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
183
184	clk_ctrl_reg &= ~QSPI_CLK_EN;
185
186	/* disable SCLK */
187	ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
188
189	/* enable SCLK */
190	clk_mask = QSPI_CLK_EN | clk_div;
191	ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG);
192	ctx_reg->clkctrl = clk_mask;
193
194	pm_runtime_mark_last_busy(qspi->dev);
195	ret = pm_runtime_put_autosuspend(qspi->dev);
196	if (ret < 0) {
197		dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
198		return ret;
199	}
200
201	return 0;
202}
203
204static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
205{
206	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
207
208	ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
209}
210
211static inline u32 qspi_is_busy(struct ti_qspi *qspi)
212{
213	u32 stat;
214	unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
215
216	stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
217	while ((stat & BUSY) && time_after(timeout, jiffies)) {
218		cpu_relax();
219		stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
220	}
221
222	WARN(stat & BUSY, "qspi busy\n");
223	return stat & BUSY;
224}
225
226static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
227{
228	u32 stat;
229	unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
230
231	do {
232		stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
233		if (stat & WC)
234			return 0;
235		cpu_relax();
236	} while (time_after(timeout, jiffies));
237
238	stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
239	if (stat & WC)
240		return 0;
241	return  -ETIMEDOUT;
242}
243
244static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
245			  int count)
246{
247	int wlen, xfer_len;
248	unsigned int cmd;
249	const u8 *txbuf;
250	u32 data;
251
252	txbuf = t->tx_buf;
253	cmd = qspi->cmd | QSPI_WR_SNGL;
254	wlen = t->bits_per_word >> 3;	/* in bytes */
255	xfer_len = wlen;
256
257	while (count) {
258		if (qspi_is_busy(qspi))
259			return -EBUSY;
260
261		switch (wlen) {
262		case 1:
263			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
264					cmd, qspi->dc, *txbuf);
265			if (count >= QSPI_WLEN_MAX_BYTES) {
266				u32 *txp = (u32 *)txbuf;
267
268				data = cpu_to_be32(*txp++);
269				writel(data, qspi->base +
270				       QSPI_SPI_DATA_REG_3);
271				data = cpu_to_be32(*txp++);
272				writel(data, qspi->base +
273				       QSPI_SPI_DATA_REG_2);
274				data = cpu_to_be32(*txp++);
275				writel(data, qspi->base +
276				       QSPI_SPI_DATA_REG_1);
277				data = cpu_to_be32(*txp++);
278				writel(data, qspi->base +
279				       QSPI_SPI_DATA_REG);
280				xfer_len = QSPI_WLEN_MAX_BYTES;
281				cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
282			} else {
283				writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
284				cmd = qspi->cmd | QSPI_WR_SNGL;
285				xfer_len = wlen;
286				cmd |= QSPI_WLEN(wlen);
287			}
288			break;
289		case 2:
290			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
291					cmd, qspi->dc, *txbuf);
292			writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
293			break;
294		case 4:
295			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
296					cmd, qspi->dc, *txbuf);
297			writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
298			break;
299		}
300
301		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
302		if (ti_qspi_poll_wc(qspi)) {
303			dev_err(qspi->dev, "write timed out\n");
304			return -ETIMEDOUT;
305		}
306		txbuf += xfer_len;
307		count -= xfer_len;
308	}
309
310	return 0;
311}
312
313static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
314			 int count)
315{
316	int wlen;
317	unsigned int cmd;
 
 
318	u8 *rxbuf;
319
320	rxbuf = t->rx_buf;
321	cmd = qspi->cmd;
322	switch (t->rx_nbits) {
323	case SPI_NBITS_DUAL:
324		cmd |= QSPI_RD_DUAL;
325		break;
326	case SPI_NBITS_QUAD:
327		cmd |= QSPI_RD_QUAD;
328		break;
329	default:
330		cmd |= QSPI_RD_SNGL;
331		break;
332	}
333	wlen = t->bits_per_word >> 3;	/* in bytes */
 
334
335	while (count) {
336		dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
337		if (qspi_is_busy(qspi))
338			return -EBUSY;
339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
341		if (ti_qspi_poll_wc(qspi)) {
342			dev_err(qspi->dev, "read timed out\n");
343			return -ETIMEDOUT;
344		}
 
345		switch (wlen) {
346		case 1:
347			*rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348			break;
349		case 2:
350			*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
351			break;
352		case 4:
353			*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
354			break;
355		}
356		rxbuf += wlen;
357		count -= wlen;
358	}
359
360	return 0;
361}
362
363static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
364			     int count)
365{
366	int ret;
367
368	if (t->tx_buf) {
369		ret = qspi_write_msg(qspi, t, count);
370		if (ret) {
371			dev_dbg(qspi->dev, "Error while writing\n");
372			return ret;
373		}
374	}
375
376	if (t->rx_buf) {
377		ret = qspi_read_msg(qspi, t, count);
378		if (ret) {
379			dev_dbg(qspi->dev, "Error while reading\n");
380			return ret;
381		}
382	}
383
384	return 0;
385}
386
387static void ti_qspi_dma_callback(void *param)
388{
389	struct ti_qspi *qspi = param;
390
391	complete(&qspi->transfer_complete);
392}
393
394static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
395			    dma_addr_t dma_src, size_t len)
396{
397	struct dma_chan *chan = qspi->rx_chan;
398	dma_cookie_t cookie;
399	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
400	struct dma_async_tx_descriptor *tx;
401	int ret;
402
403	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
404	if (!tx) {
405		dev_err(qspi->dev, "device_prep_dma_memcpy error\n");
406		return -EIO;
407	}
408
409	tx->callback = ti_qspi_dma_callback;
410	tx->callback_param = qspi;
411	cookie = tx->tx_submit(tx);
412	reinit_completion(&qspi->transfer_complete);
413
414	ret = dma_submit_error(cookie);
415	if (ret) {
416		dev_err(qspi->dev, "dma_submit_error %d\n", cookie);
417		return -EIO;
418	}
419
420	dma_async_issue_pending(chan);
421	ret = wait_for_completion_timeout(&qspi->transfer_complete,
422					  msecs_to_jiffies(len));
423	if (ret <= 0) {
424		dmaengine_terminate_sync(chan);
425		dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
426		return -ETIMEDOUT;
427	}
428
429	return 0;
430}
431
432static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, loff_t offs,
433				     void *to, size_t readsize)
434{
435	dma_addr_t dma_src = qspi->mmap_phys_base + offs;
436	int ret = 0;
437
438	/*
439	 * Use bounce buffer as FS like jffs2, ubifs may pass
440	 * buffers that does not belong to kernel lowmem region.
441	 */
442	while (readsize != 0) {
443		size_t xfer_len = min_t(size_t, QSPI_DMA_BUFFER_SIZE,
444					readsize);
445
446		ret = ti_qspi_dma_xfer(qspi, qspi->rx_bb_dma_addr,
447				       dma_src, xfer_len);
448		if (ret != 0)
449			return ret;
450		memcpy(to, qspi->rx_bb_addr, xfer_len);
451		readsize -= xfer_len;
452		dma_src += xfer_len;
453		to += xfer_len;
454	}
455
456	return ret;
457}
458
459static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
460			       loff_t from)
461{
462	struct scatterlist *sg;
463	dma_addr_t dma_src = qspi->mmap_phys_base + from;
464	dma_addr_t dma_dst;
465	int i, len, ret;
466
467	for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) {
468		dma_dst = sg_dma_address(sg);
469		len = sg_dma_len(sg);
470		ret = ti_qspi_dma_xfer(qspi, dma_dst, dma_src, len);
471		if (ret)
472			return ret;
473		dma_src += len;
474	}
475
476	return 0;
477}
478
479static void ti_qspi_enable_memory_map(struct spi_device *spi)
480{
481	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
482
483	ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
484	if (qspi->ctrl_base) {
485		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
486				   MEM_CS_MASK,
487				   MEM_CS_EN(spi->chip_select));
488	}
489	qspi->mmap_enabled = true;
 
490}
491
492static void ti_qspi_disable_memory_map(struct spi_device *spi)
493{
494	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
495
496	ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
497	if (qspi->ctrl_base)
498		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
499				   MEM_CS_MASK, 0);
500	qspi->mmap_enabled = false;
 
501}
502
503static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
504				    u8 data_nbits, u8 addr_width,
505				    u8 dummy_bytes)
506{
507	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
508	u32 memval = opcode;
509
510	switch (data_nbits) {
511	case SPI_NBITS_QUAD:
512		memval |= QSPI_SETUP_RD_QUAD;
513		break;
514	case SPI_NBITS_DUAL:
515		memval |= QSPI_SETUP_RD_DUAL;
516		break;
517	default:
518		memval |= QSPI_SETUP_RD_NORMAL;
519		break;
520	}
521	memval |= ((addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
522		   dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
523	ti_qspi_write(qspi, memval,
524		      QSPI_SPI_SETUP_REG(spi->chip_select));
525}
526
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
527static int ti_qspi_exec_mem_op(struct spi_mem *mem,
528			       const struct spi_mem_op *op)
529{
530	struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
531	u32 from = 0;
532	int ret = 0;
533
534	/* Only optimize read path. */
535	if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
536	    !op->addr.nbytes || op->addr.nbytes > 4)
537		return -ENOTSUPP;
538
539	/* Address exceeds MMIO window size, fall back to regular mode. */
540	from = op->addr.val;
541	if (from + op->data.nbytes > qspi->mmap_size)
542		return -ENOTSUPP;
543
544	mutex_lock(&qspi->list_lock);
545
546	if (!qspi->mmap_enabled)
547		ti_qspi_enable_memory_map(mem->spi);
548	ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
549				op->addr.nbytes, op->dummy.nbytes);
550
551	if (qspi->rx_chan) {
552		struct sg_table sgt;
553
554		if (virt_addr_valid(op->data.buf.in) &&
555		    !spi_controller_dma_map_mem_op_data(mem->spi->master, op,
556							&sgt)) {
557			ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
558			spi_controller_dma_unmap_mem_op_data(mem->spi->master,
559							     op, &sgt);
560		} else {
561			ret = ti_qspi_dma_bounce_buffer(qspi, from,
562							op->data.buf.in,
563							op->data.nbytes);
564		}
565	} else {
566		memcpy_fromio(op->data.buf.in, qspi->mmap_base + from,
567			      op->data.nbytes);
568	}
569
570	mutex_unlock(&qspi->list_lock);
571
572	return ret;
573}
574
575static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
576	.exec_op = ti_qspi_exec_mem_op,
 
577};
578
579static int ti_qspi_start_transfer_one(struct spi_master *master,
580		struct spi_message *m)
581{
582	struct ti_qspi *qspi = spi_master_get_devdata(master);
583	struct spi_device *spi = m->spi;
584	struct spi_transfer *t;
585	int status = 0, ret;
586	unsigned int frame_len_words, transfer_len_words;
587	int wlen;
588
589	/* setup device control reg */
590	qspi->dc = 0;
591
592	if (spi->mode & SPI_CPHA)
593		qspi->dc |= QSPI_CKPHA(spi->chip_select);
594	if (spi->mode & SPI_CPOL)
595		qspi->dc |= QSPI_CKPOL(spi->chip_select);
596	if (spi->mode & SPI_CS_HIGH)
597		qspi->dc |= QSPI_CSPOL(spi->chip_select);
598
599	frame_len_words = 0;
600	list_for_each_entry(t, &m->transfers, transfer_list)
601		frame_len_words += t->len / (t->bits_per_word >> 3);
602	frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
603
604	/* setup command reg */
605	qspi->cmd = 0;
606	qspi->cmd |= QSPI_EN_CS(spi->chip_select);
607	qspi->cmd |= QSPI_FLEN(frame_len_words);
608
609	ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
610
611	mutex_lock(&qspi->list_lock);
612
613	if (qspi->mmap_enabled)
614		ti_qspi_disable_memory_map(spi);
615
616	list_for_each_entry(t, &m->transfers, transfer_list) {
617		qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
618			     QSPI_WLEN(t->bits_per_word));
619
620		wlen = t->bits_per_word >> 3;
621		transfer_len_words = min(t->len / wlen, frame_len_words);
622
623		ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
624		if (ret) {
625			dev_dbg(qspi->dev, "transfer message failed\n");
626			mutex_unlock(&qspi->list_lock);
627			return -EINVAL;
628		}
629
630		m->actual_length += transfer_len_words * wlen;
631		frame_len_words -= transfer_len_words;
632		if (frame_len_words == 0)
633			break;
634	}
635
636	mutex_unlock(&qspi->list_lock);
637
638	ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
639	m->status = status;
640	spi_finalize_current_message(master);
641
642	return status;
643}
644
645static int ti_qspi_runtime_resume(struct device *dev)
646{
647	struct ti_qspi      *qspi;
648
649	qspi = dev_get_drvdata(dev);
650	ti_qspi_restore_ctx(qspi);
651
652	return 0;
653}
654
655static const struct of_device_id ti_qspi_match[] = {
656	{.compatible = "ti,dra7xxx-qspi" },
657	{.compatible = "ti,am4372-qspi" },
658	{},
659};
660MODULE_DEVICE_TABLE(of, ti_qspi_match);
661
662static int ti_qspi_probe(struct platform_device *pdev)
663{
664	struct  ti_qspi *qspi;
665	struct spi_master *master;
666	struct resource         *r, *res_mmap;
667	struct device_node *np = pdev->dev.of_node;
668	u32 max_freq;
669	int ret = 0, num_cs, irq;
670	dma_cap_mask_t mask;
671
672	master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
673	if (!master)
674		return -ENOMEM;
675
676	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
677
678	master->flags = SPI_MASTER_HALF_DUPLEX;
679	master->setup = ti_qspi_setup;
680	master->auto_runtime_pm = true;
681	master->transfer_one_message = ti_qspi_start_transfer_one;
682	master->dev.of_node = pdev->dev.of_node;
683	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
684				     SPI_BPW_MASK(8);
685	master->mem_ops = &ti_qspi_mem_ops;
686
687	if (!of_property_read_u32(np, "num-cs", &num_cs))
688		master->num_chipselect = num_cs;
689
690	qspi = spi_master_get_devdata(master);
691	qspi->master = master;
692	qspi->dev = &pdev->dev;
693	platform_set_drvdata(pdev, qspi);
694
695	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
696	if (r == NULL) {
697		r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
698		if (r == NULL) {
699			dev_err(&pdev->dev, "missing platform data\n");
700			ret = -ENODEV;
701			goto free_master;
702		}
703	}
704
705	res_mmap = platform_get_resource_byname(pdev,
706			IORESOURCE_MEM, "qspi_mmap");
707	if (res_mmap == NULL) {
708		res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
709		if (res_mmap == NULL) {
710			dev_err(&pdev->dev,
711				"memory mapped resource not required\n");
712		}
713	}
714
715	if (res_mmap)
716		qspi->mmap_size = resource_size(res_mmap);
717
718	irq = platform_get_irq(pdev, 0);
719	if (irq < 0) {
720		ret = irq;
721		goto free_master;
722	}
723
724	mutex_init(&qspi->list_lock);
725
726	qspi->base = devm_ioremap_resource(&pdev->dev, r);
727	if (IS_ERR(qspi->base)) {
728		ret = PTR_ERR(qspi->base);
729		goto free_master;
730	}
731
732
733	if (of_property_read_bool(np, "syscon-chipselects")) {
734		qspi->ctrl_base =
735		syscon_regmap_lookup_by_phandle(np,
736						"syscon-chipselects");
737		if (IS_ERR(qspi->ctrl_base)) {
738			ret = PTR_ERR(qspi->ctrl_base);
739			goto free_master;
740		}
741		ret = of_property_read_u32_index(np,
742						 "syscon-chipselects",
743						 1, &qspi->ctrl_reg);
744		if (ret) {
745			dev_err(&pdev->dev,
746				"couldn't get ctrl_mod reg index\n");
747			goto free_master;
748		}
749	}
750
751	qspi->fclk = devm_clk_get(&pdev->dev, "fck");
752	if (IS_ERR(qspi->fclk)) {
753		ret = PTR_ERR(qspi->fclk);
754		dev_err(&pdev->dev, "could not get clk: %d\n", ret);
755	}
756
757	pm_runtime_use_autosuspend(&pdev->dev);
758	pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
759	pm_runtime_enable(&pdev->dev);
760
761	if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
762		qspi->spi_max_frequency = max_freq;
763
764	dma_cap_zero(mask);
765	dma_cap_set(DMA_MEMCPY, mask);
766
767	qspi->rx_chan = dma_request_chan_by_mask(&mask);
768	if (IS_ERR(qspi->rx_chan)) {
769		dev_err(qspi->dev,
770			"No Rx DMA available, trying mmap mode\n");
771		qspi->rx_chan = NULL;
772		ret = 0;
773		goto no_dma;
774	}
775	qspi->rx_bb_addr = dma_alloc_coherent(qspi->dev,
776					      QSPI_DMA_BUFFER_SIZE,
777					      &qspi->rx_bb_dma_addr,
778					      GFP_KERNEL | GFP_DMA);
779	if (!qspi->rx_bb_addr) {
780		dev_err(qspi->dev,
781			"dma_alloc_coherent failed, using PIO mode\n");
782		dma_release_channel(qspi->rx_chan);
783		goto no_dma;
784	}
785	master->dma_rx = qspi->rx_chan;
786	init_completion(&qspi->transfer_complete);
787	if (res_mmap)
788		qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
789
790no_dma:
791	if (!qspi->rx_chan && res_mmap) {
792		qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
793		if (IS_ERR(qspi->mmap_base)) {
794			dev_info(&pdev->dev,
795				 "mmap failed with error %ld using PIO mode\n",
796				 PTR_ERR(qspi->mmap_base));
797			qspi->mmap_base = NULL;
798			master->mem_ops = NULL;
799		}
800	}
801	qspi->mmap_enabled = false;
 
802
803	ret = devm_spi_register_master(&pdev->dev, master);
804	if (!ret)
805		return 0;
806
807	pm_runtime_disable(&pdev->dev);
808free_master:
809	spi_master_put(master);
810	return ret;
811}
812
813static int ti_qspi_remove(struct platform_device *pdev)
814{
815	struct ti_qspi *qspi = platform_get_drvdata(pdev);
816	int rc;
817
818	rc = spi_master_suspend(qspi->master);
819	if (rc)
820		return rc;
821
822	pm_runtime_put_sync(&pdev->dev);
823	pm_runtime_disable(&pdev->dev);
824
825	if (qspi->rx_bb_addr)
826		dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
827				  qspi->rx_bb_addr,
828				  qspi->rx_bb_dma_addr);
829	if (qspi->rx_chan)
830		dma_release_channel(qspi->rx_chan);
831
832	return 0;
833}
834
835static const struct dev_pm_ops ti_qspi_pm_ops = {
836	.runtime_resume = ti_qspi_runtime_resume,
837};
838
839static struct platform_driver ti_qspi_driver = {
840	.probe	= ti_qspi_probe,
841	.remove = ti_qspi_remove,
842	.driver = {
843		.name	= "ti-qspi",
844		.pm =   &ti_qspi_pm_ops,
845		.of_match_table = ti_qspi_match,
846	}
847};
848
849module_platform_driver(ti_qspi_driver);
850
851MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
852MODULE_LICENSE("GPL v2");
853MODULE_DESCRIPTION("TI QSPI controller driver");
854MODULE_ALIAS("platform:ti-qspi");