Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
  3 * Author: Addy Ke <addy.ke@rock-chips.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms and conditions of the GNU General Public License,
  7 * version 2, as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 */
 15
 16#include <linux/clk.h>
 17#include <linux/dmaengine.h>
 18#include <linux/module.h>
 19#include <linux/of.h>
 
 20#include <linux/platform_device.h>
 21#include <linux/spi/spi.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/scatterlist.h>
 24
 25#define DRIVER_NAME "rockchip-spi"
 26
 
 
 
 
 
 27/* SPI register offsets */
 28#define ROCKCHIP_SPI_CTRLR0			0x0000
 29#define ROCKCHIP_SPI_CTRLR1			0x0004
 30#define ROCKCHIP_SPI_SSIENR			0x0008
 31#define ROCKCHIP_SPI_SER			0x000c
 32#define ROCKCHIP_SPI_BAUDR			0x0010
 33#define ROCKCHIP_SPI_TXFTLR			0x0014
 34#define ROCKCHIP_SPI_RXFTLR			0x0018
 35#define ROCKCHIP_SPI_TXFLR			0x001c
 36#define ROCKCHIP_SPI_RXFLR			0x0020
 37#define ROCKCHIP_SPI_SR				0x0024
 38#define ROCKCHIP_SPI_IPR			0x0028
 39#define ROCKCHIP_SPI_IMR			0x002c
 40#define ROCKCHIP_SPI_ISR			0x0030
 41#define ROCKCHIP_SPI_RISR			0x0034
 42#define ROCKCHIP_SPI_ICR			0x0038
 43#define ROCKCHIP_SPI_DMACR			0x003c
 44#define ROCKCHIP_SPI_DMATDLR		0x0040
 45#define ROCKCHIP_SPI_DMARDLR		0x0044
 46#define ROCKCHIP_SPI_TXDR			0x0400
 47#define ROCKCHIP_SPI_RXDR			0x0800
 48
 49/* Bit fields in CTRLR0 */
 50#define CR0_DFS_OFFSET				0
 51
 52#define CR0_CFS_OFFSET				2
 53
 54#define CR0_SCPH_OFFSET				6
 55
 56#define CR0_SCPOL_OFFSET			7
 57
 58#define CR0_CSM_OFFSET				8
 59#define CR0_CSM_KEEP				0x0
 60/* ss_n be high for half sclk_out cycles */
 61#define CR0_CSM_HALF				0X1
 62/* ss_n be high for one sclk_out cycle */
 63#define CR0_CSM_ONE					0x2
 64
 65/* ss_n to sclk_out delay */
 66#define CR0_SSD_OFFSET				10
 67/*
 68 * The period between ss_n active and
 69 * sclk_out active is half sclk_out cycles
 70 */
 71#define CR0_SSD_HALF				0x0
 72/*
 73 * The period between ss_n active and
 74 * sclk_out active is one sclk_out cycle
 75 */
 76#define CR0_SSD_ONE					0x1
 77
 78#define CR0_EM_OFFSET				11
 79#define CR0_EM_LITTLE				0x0
 80#define CR0_EM_BIG					0x1
 81
 82#define CR0_FBM_OFFSET				12
 83#define CR0_FBM_MSB					0x0
 84#define CR0_FBM_LSB					0x1
 85
 86#define CR0_BHT_OFFSET				13
 87#define CR0_BHT_16BIT				0x0
 88#define CR0_BHT_8BIT				0x1
 89
 90#define CR0_RSD_OFFSET				14
 91
 92#define CR0_FRF_OFFSET				16
 93#define CR0_FRF_SPI					0x0
 94#define CR0_FRF_SSP					0x1
 95#define CR0_FRF_MICROWIRE			0x2
 96
 97#define CR0_XFM_OFFSET				18
 98#define CR0_XFM_MASK				(0x03 << SPI_XFM_OFFSET)
 99#define CR0_XFM_TR					0x0
100#define CR0_XFM_TO					0x1
101#define CR0_XFM_RO					0x2
102
103#define CR0_OPM_OFFSET				20
104#define CR0_OPM_MASTER				0x0
105#define CR0_OPM_SLAVE				0x1
106
107#define CR0_MTM_OFFSET				0x21
108
109/* Bit fields in SER, 2bit */
110#define SER_MASK					0x3
111
112/* Bit fields in SR, 5bit */
113#define SR_MASK						0x1f
114#define SR_BUSY						(1 << 0)
115#define SR_TF_FULL					(1 << 1)
116#define SR_TF_EMPTY					(1 << 2)
117#define SR_RF_EMPTY					(1 << 3)
118#define SR_RF_FULL					(1 << 4)
119
120/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
121#define INT_MASK					0x1f
122#define INT_TF_EMPTY				(1 << 0)
123#define INT_TF_OVERFLOW				(1 << 1)
124#define INT_RF_UNDERFLOW			(1 << 2)
125#define INT_RF_OVERFLOW				(1 << 3)
126#define INT_RF_FULL					(1 << 4)
127
128/* Bit fields in ICR, 4bit */
129#define ICR_MASK					0x0f
130#define ICR_ALL						(1 << 0)
131#define ICR_RF_UNDERFLOW			(1 << 1)
132#define ICR_RF_OVERFLOW				(1 << 2)
133#define ICR_TF_OVERFLOW				(1 << 3)
134
135/* Bit fields in DMACR */
136#define RF_DMA_EN					(1 << 0)
137#define TF_DMA_EN					(1 << 1)
138
139#define RXBUSY						(1 << 0)
140#define TXBUSY						(1 << 1)
141
142/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
143#define MAX_SCLK_OUT		50000000
144
 
 
 
 
 
 
 
 
145enum rockchip_ssi_type {
146	SSI_MOTO_SPI = 0,
147	SSI_TI_SSP,
148	SSI_NS_MICROWIRE,
149};
150
151struct rockchip_spi_dma_data {
152	struct dma_chan *ch;
153	enum dma_transfer_direction direction;
154	dma_addr_t addr;
155};
156
157struct rockchip_spi {
158	struct device *dev;
159	struct spi_master *master;
160
161	struct clk *spiclk;
162	struct clk *apb_pclk;
163
164	void __iomem *regs;
165	/*depth of the FIFO buffer */
166	u32 fifo_len;
167	/* max bus freq supported */
168	u32 max_freq;
169	/* supported slave numbers */
170	enum rockchip_ssi_type type;
171
172	u16 mode;
173	u8 tmode;
174	u8 bpw;
175	u8 n_bytes;
176	u32 rsd_nsecs;
177	unsigned len;
178	u32 speed;
179
180	const void *tx;
181	const void *tx_end;
182	void *rx;
183	void *rx_end;
184
185	u32 state;
186	/* protect state */
187	spinlock_t lock;
188
 
 
189	u32 use_dma;
190	struct sg_table tx_sg;
191	struct sg_table rx_sg;
192	struct rockchip_spi_dma_data dma_rx;
193	struct rockchip_spi_dma_data dma_tx;
194	struct dma_slave_caps dma_caps;
195};
196
197static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
198{
199	writel_relaxed((enable ? 1 : 0), rs->regs + ROCKCHIP_SPI_SSIENR);
200}
201
202static inline void spi_set_clk(struct rockchip_spi *rs, u16 div)
203{
204	writel_relaxed(div, rs->regs + ROCKCHIP_SPI_BAUDR);
205}
206
207static inline void flush_fifo(struct rockchip_spi *rs)
208{
209	while (readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR))
210		readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
211}
212
213static inline void wait_for_idle(struct rockchip_spi *rs)
214{
215	unsigned long timeout = jiffies + msecs_to_jiffies(5);
216
217	do {
218		if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
219			return;
220	} while (!time_after(jiffies, timeout));
221
222	dev_warn(rs->dev, "spi controller is in busy state!\n");
223}
224
225static u32 get_fifo_len(struct rockchip_spi *rs)
226{
227	u32 fifo;
228
229	for (fifo = 2; fifo < 32; fifo++) {
230		writel_relaxed(fifo, rs->regs + ROCKCHIP_SPI_TXFTLR);
231		if (fifo != readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFTLR))
232			break;
233	}
234
235	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_TXFTLR);
236
237	return (fifo == 31) ? 0 : fifo;
238}
239
240static inline u32 tx_max(struct rockchip_spi *rs)
241{
242	u32 tx_left, tx_room;
243
244	tx_left = (rs->tx_end - rs->tx) / rs->n_bytes;
245	tx_room = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
246
247	return min(tx_left, tx_room);
248}
249
250static inline u32 rx_max(struct rockchip_spi *rs)
251{
252	u32 rx_left = (rs->rx_end - rs->rx) / rs->n_bytes;
253	u32 rx_room = (u32)readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
254
255	return min(rx_left, rx_room);
256}
257
258static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
259{
260	u32 ser;
261	struct spi_master *master = spi->master;
262	struct rockchip_spi *rs = spi_master_get_devdata(master);
 
263
264	pm_runtime_get_sync(rs->dev);
265
266	ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
 
 
 
 
267
268	/*
269	 * drivers/spi/spi.c:
270	 * static void spi_set_cs(struct spi_device *spi, bool enable)
271	 * {
272	 *		if (spi->mode & SPI_CS_HIGH)
273	 *			enable = !enable;
274	 *
275	 *		if (spi->cs_gpio >= 0)
276	 *			gpio_set_value(spi->cs_gpio, !enable);
277	 *		else if (spi->master->set_cs)
278	 *		spi->master->set_cs(spi, !enable);
279	 * }
280	 *
281	 * Note: enable(rockchip_spi_set_cs) = !enable(spi_set_cs)
282	 */
283	if (!enable)
284		ser |= 1 << spi->chip_select;
285	else
286		ser &= ~(1 << spi->chip_select);
287
288	writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
 
 
289
290	pm_runtime_put_sync(rs->dev);
291}
292
293static int rockchip_spi_prepare_message(struct spi_master *master,
294					struct spi_message *msg)
295{
296	struct rockchip_spi *rs = spi_master_get_devdata(master);
297	struct spi_device *spi = msg->spi;
298
299	rs->mode = spi->mode;
300
301	return 0;
302}
303
304static void rockchip_spi_handle_err(struct spi_master *master,
305				    struct spi_message *msg)
306{
307	unsigned long flags;
308	struct rockchip_spi *rs = spi_master_get_devdata(master);
309
310	spin_lock_irqsave(&rs->lock, flags);
311
312	/*
313	 * For DMA mode, we need terminate DMA channel and flush
314	 * fifo for the next transfer if DMA thansfer timeout.
315	 * handle_err() was called by core if transfer failed.
316	 * Maybe it is reasonable for error handling here.
317	 */
318	if (rs->use_dma) {
319		if (rs->state & RXBUSY) {
320			dmaengine_terminate_async(rs->dma_rx.ch);
321			flush_fifo(rs);
322		}
323
324		if (rs->state & TXBUSY)
325			dmaengine_terminate_async(rs->dma_tx.ch);
326	}
327
328	spin_unlock_irqrestore(&rs->lock, flags);
329}
330
331static int rockchip_spi_unprepare_message(struct spi_master *master,
332					  struct spi_message *msg)
333{
334	struct rockchip_spi *rs = spi_master_get_devdata(master);
335
336	spi_enable_chip(rs, 0);
337
338	return 0;
339}
340
341static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
342{
343	u32 max = tx_max(rs);
344	u32 txw = 0;
345
346	while (max--) {
347		if (rs->n_bytes == 1)
348			txw = *(u8 *)(rs->tx);
349		else
350			txw = *(u16 *)(rs->tx);
351
352		writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
353		rs->tx += rs->n_bytes;
354	}
355}
356
357static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
358{
359	u32 max = rx_max(rs);
360	u32 rxw;
361
362	while (max--) {
363		rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
364		if (rs->n_bytes == 1)
365			*(u8 *)(rs->rx) = (u8)rxw;
366		else
367			*(u16 *)(rs->rx) = (u16)rxw;
368		rs->rx += rs->n_bytes;
369	}
370}
371
372static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
373{
374	int remain = 0;
375
376	do {
377		if (rs->tx) {
378			remain = rs->tx_end - rs->tx;
379			rockchip_spi_pio_writer(rs);
380		}
381
382		if (rs->rx) {
383			remain = rs->rx_end - rs->rx;
384			rockchip_spi_pio_reader(rs);
385		}
386
387		cpu_relax();
388	} while (remain);
389
390	/* If tx, wait until the FIFO data completely. */
391	if (rs->tx)
392		wait_for_idle(rs);
393
394	spi_enable_chip(rs, 0);
395
396	return 0;
397}
398
399static void rockchip_spi_dma_rxcb(void *data)
400{
401	unsigned long flags;
402	struct rockchip_spi *rs = data;
403
404	spin_lock_irqsave(&rs->lock, flags);
405
406	rs->state &= ~RXBUSY;
407	if (!(rs->state & TXBUSY)) {
408		spi_enable_chip(rs, 0);
409		spi_finalize_current_transfer(rs->master);
410	}
411
412	spin_unlock_irqrestore(&rs->lock, flags);
413}
414
415static void rockchip_spi_dma_txcb(void *data)
416{
417	unsigned long flags;
418	struct rockchip_spi *rs = data;
419
420	/* Wait until the FIFO data completely. */
421	wait_for_idle(rs);
422
423	spin_lock_irqsave(&rs->lock, flags);
424
425	rs->state &= ~TXBUSY;
426	if (!(rs->state & RXBUSY)) {
427		spi_enable_chip(rs, 0);
428		spi_finalize_current_transfer(rs->master);
429	}
430
431	spin_unlock_irqrestore(&rs->lock, flags);
432}
433
434static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
435{
436	unsigned long flags;
437	struct dma_slave_config rxconf, txconf;
438	struct dma_async_tx_descriptor *rxdesc, *txdesc;
439
440	spin_lock_irqsave(&rs->lock, flags);
441	rs->state &= ~RXBUSY;
442	rs->state &= ~TXBUSY;
443	spin_unlock_irqrestore(&rs->lock, flags);
444
445	rxdesc = NULL;
446	if (rs->rx) {
447		rxconf.direction = rs->dma_rx.direction;
448		rxconf.src_addr = rs->dma_rx.addr;
449		rxconf.src_addr_width = rs->n_bytes;
450		if (rs->dma_caps.max_burst > 4)
451			rxconf.src_maxburst = 4;
452		else
453			rxconf.src_maxburst = 1;
454		dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
455
456		rxdesc = dmaengine_prep_slave_sg(
457				rs->dma_rx.ch,
458				rs->rx_sg.sgl, rs->rx_sg.nents,
459				rs->dma_rx.direction, DMA_PREP_INTERRUPT);
460		if (!rxdesc)
461			return -EINVAL;
462
463		rxdesc->callback = rockchip_spi_dma_rxcb;
464		rxdesc->callback_param = rs;
465	}
466
467	txdesc = NULL;
468	if (rs->tx) {
469		txconf.direction = rs->dma_tx.direction;
470		txconf.dst_addr = rs->dma_tx.addr;
471		txconf.dst_addr_width = rs->n_bytes;
472		if (rs->dma_caps.max_burst > 4)
473			txconf.dst_maxburst = 4;
474		else
475			txconf.dst_maxburst = 1;
476		dmaengine_slave_config(rs->dma_tx.ch, &txconf);
477
478		txdesc = dmaengine_prep_slave_sg(
479				rs->dma_tx.ch,
480				rs->tx_sg.sgl, rs->tx_sg.nents,
481				rs->dma_tx.direction, DMA_PREP_INTERRUPT);
482		if (!txdesc) {
483			if (rxdesc)
484				dmaengine_terminate_sync(rs->dma_rx.ch);
485			return -EINVAL;
486		}
487
488		txdesc->callback = rockchip_spi_dma_txcb;
489		txdesc->callback_param = rs;
490	}
491
492	/* rx must be started before tx due to spi instinct */
493	if (rxdesc) {
494		spin_lock_irqsave(&rs->lock, flags);
495		rs->state |= RXBUSY;
496		spin_unlock_irqrestore(&rs->lock, flags);
497		dmaengine_submit(rxdesc);
498		dma_async_issue_pending(rs->dma_rx.ch);
499	}
500
501	if (txdesc) {
502		spin_lock_irqsave(&rs->lock, flags);
503		rs->state |= TXBUSY;
504		spin_unlock_irqrestore(&rs->lock, flags);
505		dmaengine_submit(txdesc);
506		dma_async_issue_pending(rs->dma_tx.ch);
507	}
508
509	return 0;
510}
511
512static void rockchip_spi_config(struct rockchip_spi *rs)
513{
514	u32 div = 0;
515	u32 dmacr = 0;
516	int rsd = 0;
517
518	u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
519		| (CR0_SSD_ONE << CR0_SSD_OFFSET)
520		| (CR0_EM_BIG << CR0_EM_OFFSET);
521
522	cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
523	cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
524	cr0 |= (rs->tmode << CR0_XFM_OFFSET);
525	cr0 |= (rs->type << CR0_FRF_OFFSET);
526
527	if (rs->use_dma) {
528		if (rs->tx)
529			dmacr |= TF_DMA_EN;
530		if (rs->rx)
531			dmacr |= RF_DMA_EN;
532	}
533
534	if (WARN_ON(rs->speed > MAX_SCLK_OUT))
535		rs->speed = MAX_SCLK_OUT;
536
537	/* the minimum divisor is 2 */
538	if (rs->max_freq < 2 * rs->speed) {
539		clk_set_rate(rs->spiclk, 2 * rs->speed);
540		rs->max_freq = clk_get_rate(rs->spiclk);
541	}
542
543	/* div doesn't support odd number */
544	div = DIV_ROUND_UP(rs->max_freq, rs->speed);
545	div = (div + 1) & 0xfffe;
546
547	/* Rx sample delay is expressed in parent clock cycles (max 3) */
548	rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8),
549				1000000000 >> 8);
550	if (!rsd && rs->rsd_nsecs) {
551		pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n",
552			     rs->max_freq, rs->rsd_nsecs);
553	} else if (rsd > 3) {
554		rsd = 3;
555		pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n",
556			     rs->max_freq, rs->rsd_nsecs,
557			     rsd * 1000000000U / rs->max_freq);
558	}
559	cr0 |= rsd << CR0_RSD_OFFSET;
560
561	writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
562
563	writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
 
 
 
 
 
 
564	writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
565	writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
566
567	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR);
568	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
569	writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
570
571	spi_set_clk(rs, div);
572
573	dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
574}
575
 
 
 
 
 
576static int rockchip_spi_transfer_one(
577		struct spi_master *master,
578		struct spi_device *spi,
579		struct spi_transfer *xfer)
580{
581	int ret = 1;
582	struct rockchip_spi *rs = spi_master_get_devdata(master);
583
584	WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
585		(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
586
587	if (!xfer->tx_buf && !xfer->rx_buf) {
588		dev_err(rs->dev, "No buffer for transfer\n");
589		return -EINVAL;
590	}
591
 
 
 
 
 
592	rs->speed = xfer->speed_hz;
593	rs->bpw = xfer->bits_per_word;
594	rs->n_bytes = rs->bpw >> 3;
595
596	rs->tx = xfer->tx_buf;
597	rs->tx_end = rs->tx + xfer->len;
598	rs->rx = xfer->rx_buf;
599	rs->rx_end = rs->rx + xfer->len;
600	rs->len = xfer->len;
601
602	rs->tx_sg = xfer->tx_sg;
603	rs->rx_sg = xfer->rx_sg;
604
605	if (rs->tx && rs->rx)
606		rs->tmode = CR0_XFM_TR;
607	else if (rs->tx)
608		rs->tmode = CR0_XFM_TO;
609	else if (rs->rx)
610		rs->tmode = CR0_XFM_RO;
611
612	/* we need prepare dma before spi was enabled */
613	if (master->can_dma && master->can_dma(master, spi, xfer))
614		rs->use_dma = 1;
615	else
616		rs->use_dma = 0;
617
618	rockchip_spi_config(rs);
619
620	if (rs->use_dma) {
621		if (rs->tmode == CR0_XFM_RO) {
622			/* rx: dma must be prepared first */
623			ret = rockchip_spi_prepare_dma(rs);
624			spi_enable_chip(rs, 1);
625		} else {
626			/* tx or tr: spi must be enabled first */
627			spi_enable_chip(rs, 1);
628			ret = rockchip_spi_prepare_dma(rs);
629		}
 
 
630	} else {
631		spi_enable_chip(rs, 1);
632		ret = rockchip_spi_pio_transfer(rs);
633	}
634
635	return ret;
636}
637
638static bool rockchip_spi_can_dma(struct spi_master *master,
639				 struct spi_device *spi,
640				 struct spi_transfer *xfer)
641{
642	struct rockchip_spi *rs = spi_master_get_devdata(master);
643
644	return (xfer->len > rs->fifo_len);
645}
646
647static int rockchip_spi_probe(struct platform_device *pdev)
648{
649	int ret = 0;
650	struct rockchip_spi *rs;
651	struct spi_master *master;
652	struct resource *mem;
653	u32 rsd_nsecs;
654
655	master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
656	if (!master)
657		return -ENOMEM;
658
659	platform_set_drvdata(pdev, master);
660
661	rs = spi_master_get_devdata(master);
662
663	/* Get basic io resource and map it */
664	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
665	rs->regs = devm_ioremap_resource(&pdev->dev, mem);
666	if (IS_ERR(rs->regs)) {
667		ret =  PTR_ERR(rs->regs);
668		goto err_ioremap_resource;
669	}
670
671	rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
672	if (IS_ERR(rs->apb_pclk)) {
673		dev_err(&pdev->dev, "Failed to get apb_pclk\n");
674		ret = PTR_ERR(rs->apb_pclk);
675		goto err_ioremap_resource;
676	}
677
678	rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
679	if (IS_ERR(rs->spiclk)) {
680		dev_err(&pdev->dev, "Failed to get spi_pclk\n");
681		ret = PTR_ERR(rs->spiclk);
682		goto err_ioremap_resource;
683	}
684
685	ret = clk_prepare_enable(rs->apb_pclk);
686	if (ret) {
687		dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
688		goto err_ioremap_resource;
689	}
690
691	ret = clk_prepare_enable(rs->spiclk);
692	if (ret) {
693		dev_err(&pdev->dev, "Failed to enable spi_clk\n");
694		goto err_spiclk_enable;
695	}
696
697	spi_enable_chip(rs, 0);
698
699	rs->type = SSI_MOTO_SPI;
700	rs->master = master;
701	rs->dev = &pdev->dev;
702	rs->max_freq = clk_get_rate(rs->spiclk);
703
704	if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
705				  &rsd_nsecs))
706		rs->rsd_nsecs = rsd_nsecs;
707
708	rs->fifo_len = get_fifo_len(rs);
709	if (!rs->fifo_len) {
710		dev_err(&pdev->dev, "Failed to get fifo length\n");
711		ret = -EINVAL;
712		goto err_get_fifo_len;
713	}
714
715	spin_lock_init(&rs->lock);
716
717	pm_runtime_set_active(&pdev->dev);
718	pm_runtime_enable(&pdev->dev);
719
720	master->auto_runtime_pm = true;
721	master->bus_num = pdev->id;
722	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
723	master->num_chipselect = 2;
724	master->dev.of_node = pdev->dev.of_node;
725	master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
726
727	master->set_cs = rockchip_spi_set_cs;
728	master->prepare_message = rockchip_spi_prepare_message;
729	master->unprepare_message = rockchip_spi_unprepare_message;
730	master->transfer_one = rockchip_spi_transfer_one;
 
731	master->handle_err = rockchip_spi_handle_err;
 
732
733	rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
734	if (IS_ERR(rs->dma_tx.ch)) {
735		/* Check tx to see if we need defer probing driver */
736		if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
737			ret = -EPROBE_DEFER;
738			goto err_get_fifo_len;
739		}
740		dev_warn(rs->dev, "Failed to request TX DMA channel\n");
741		rs->dma_tx.ch = NULL;
742	}
743
744	rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
745	if (IS_ERR(rs->dma_rx.ch)) {
746		if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
747			dma_release_channel(rs->dma_tx.ch);
748			rs->dma_tx.ch = NULL;
749			ret = -EPROBE_DEFER;
750			goto err_get_fifo_len;
751		}
752		dev_warn(rs->dev, "Failed to request RX DMA channel\n");
753		rs->dma_rx.ch = NULL;
754	}
755
756	if (rs->dma_tx.ch && rs->dma_rx.ch) {
757		dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
758		rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
759		rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
760		rs->dma_tx.direction = DMA_MEM_TO_DEV;
761		rs->dma_rx.direction = DMA_DEV_TO_MEM;
762
763		master->can_dma = rockchip_spi_can_dma;
764		master->dma_tx = rs->dma_tx.ch;
765		master->dma_rx = rs->dma_rx.ch;
766	}
767
768	ret = devm_spi_register_master(&pdev->dev, master);
769	if (ret) {
770		dev_err(&pdev->dev, "Failed to register master\n");
771		goto err_register_master;
772	}
773
774	return 0;
775
776err_register_master:
777	pm_runtime_disable(&pdev->dev);
778	if (rs->dma_tx.ch)
779		dma_release_channel(rs->dma_tx.ch);
780	if (rs->dma_rx.ch)
781		dma_release_channel(rs->dma_rx.ch);
782err_get_fifo_len:
 
 
 
 
 
783	clk_disable_unprepare(rs->spiclk);
784err_spiclk_enable:
785	clk_disable_unprepare(rs->apb_pclk);
786err_ioremap_resource:
787	spi_master_put(master);
788
789	return ret;
790}
791
792static int rockchip_spi_remove(struct platform_device *pdev)
793{
794	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
795	struct rockchip_spi *rs = spi_master_get_devdata(master);
796
797	pm_runtime_disable(&pdev->dev);
798
799	clk_disable_unprepare(rs->spiclk);
800	clk_disable_unprepare(rs->apb_pclk);
801
 
 
 
 
802	if (rs->dma_tx.ch)
803		dma_release_channel(rs->dma_tx.ch);
804	if (rs->dma_rx.ch)
805		dma_release_channel(rs->dma_rx.ch);
806
807	spi_master_put(master);
808
809	return 0;
810}
811
812#ifdef CONFIG_PM_SLEEP
813static int rockchip_spi_suspend(struct device *dev)
814{
815	int ret = 0;
816	struct spi_master *master = dev_get_drvdata(dev);
817	struct rockchip_spi *rs = spi_master_get_devdata(master);
818
819	ret = spi_master_suspend(rs->master);
820	if (ret)
821		return ret;
822
823	if (!pm_runtime_suspended(dev)) {
824		clk_disable_unprepare(rs->spiclk);
825		clk_disable_unprepare(rs->apb_pclk);
826	}
827
828	return ret;
 
 
829}
830
831static int rockchip_spi_resume(struct device *dev)
832{
833	int ret = 0;
834	struct spi_master *master = dev_get_drvdata(dev);
835	struct rockchip_spi *rs = spi_master_get_devdata(master);
836
837	if (!pm_runtime_suspended(dev)) {
838		ret = clk_prepare_enable(rs->apb_pclk);
839		if (ret < 0)
840			return ret;
841
842		ret = clk_prepare_enable(rs->spiclk);
843		if (ret < 0) {
844			clk_disable_unprepare(rs->apb_pclk);
845			return ret;
846		}
847	}
848
849	ret = spi_master_resume(rs->master);
850	if (ret < 0) {
851		clk_disable_unprepare(rs->spiclk);
852		clk_disable_unprepare(rs->apb_pclk);
853	}
854
855	return ret;
856}
857#endif /* CONFIG_PM_SLEEP */
858
859#ifdef CONFIG_PM
860static int rockchip_spi_runtime_suspend(struct device *dev)
861{
862	struct spi_master *master = dev_get_drvdata(dev);
863	struct rockchip_spi *rs = spi_master_get_devdata(master);
864
865	clk_disable_unprepare(rs->spiclk);
866	clk_disable_unprepare(rs->apb_pclk);
867
868	return 0;
869}
870
871static int rockchip_spi_runtime_resume(struct device *dev)
872{
873	int ret;
874	struct spi_master *master = dev_get_drvdata(dev);
875	struct rockchip_spi *rs = spi_master_get_devdata(master);
876
877	ret = clk_prepare_enable(rs->apb_pclk);
878	if (ret)
879		return ret;
880
881	ret = clk_prepare_enable(rs->spiclk);
882	if (ret)
883		clk_disable_unprepare(rs->apb_pclk);
884
885	return ret;
886}
887#endif /* CONFIG_PM */
888
889static const struct dev_pm_ops rockchip_spi_pm = {
890	SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
891	SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
892			   rockchip_spi_runtime_resume, NULL)
893};
894
895static const struct of_device_id rockchip_spi_dt_match[] = {
 
 
896	{ .compatible = "rockchip,rk3066-spi", },
897	{ .compatible = "rockchip,rk3188-spi", },
 
898	{ .compatible = "rockchip,rk3288-spi", },
 
899	{ .compatible = "rockchip,rk3399-spi", },
900	{ },
901};
902MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
903
904static struct platform_driver rockchip_spi_driver = {
905	.driver = {
906		.name	= DRIVER_NAME,
907		.pm = &rockchip_spi_pm,
908		.of_match_table = of_match_ptr(rockchip_spi_dt_match),
909	},
910	.probe = rockchip_spi_probe,
911	.remove = rockchip_spi_remove,
912};
913
914module_platform_driver(rockchip_spi_driver);
915
916MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
917MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
918MODULE_LICENSE("GPL v2");
v4.17
  1/*
  2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
  3 * Author: Addy Ke <addy.ke@rock-chips.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms and conditions of the GNU General Public License,
  7 * version 2, as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 */
 15
 16#include <linux/clk.h>
 17#include <linux/dmaengine.h>
 18#include <linux/module.h>
 19#include <linux/of.h>
 20#include <linux/pinctrl/consumer.h>
 21#include <linux/platform_device.h>
 22#include <linux/spi/spi.h>
 23#include <linux/pm_runtime.h>
 24#include <linux/scatterlist.h>
 25
 26#define DRIVER_NAME "rockchip-spi"
 27
 28#define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
 29		writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
 30#define ROCKCHIP_SPI_SET_BITS(reg, bits) \
 31		writel_relaxed(readl_relaxed(reg) | (bits), reg)
 32
 33/* SPI register offsets */
 34#define ROCKCHIP_SPI_CTRLR0			0x0000
 35#define ROCKCHIP_SPI_CTRLR1			0x0004
 36#define ROCKCHIP_SPI_SSIENR			0x0008
 37#define ROCKCHIP_SPI_SER			0x000c
 38#define ROCKCHIP_SPI_BAUDR			0x0010
 39#define ROCKCHIP_SPI_TXFTLR			0x0014
 40#define ROCKCHIP_SPI_RXFTLR			0x0018
 41#define ROCKCHIP_SPI_TXFLR			0x001c
 42#define ROCKCHIP_SPI_RXFLR			0x0020
 43#define ROCKCHIP_SPI_SR				0x0024
 44#define ROCKCHIP_SPI_IPR			0x0028
 45#define ROCKCHIP_SPI_IMR			0x002c
 46#define ROCKCHIP_SPI_ISR			0x0030
 47#define ROCKCHIP_SPI_RISR			0x0034
 48#define ROCKCHIP_SPI_ICR			0x0038
 49#define ROCKCHIP_SPI_DMACR			0x003c
 50#define ROCKCHIP_SPI_DMATDLR		0x0040
 51#define ROCKCHIP_SPI_DMARDLR		0x0044
 52#define ROCKCHIP_SPI_TXDR			0x0400
 53#define ROCKCHIP_SPI_RXDR			0x0800
 54
 55/* Bit fields in CTRLR0 */
 56#define CR0_DFS_OFFSET				0
 57
 58#define CR0_CFS_OFFSET				2
 59
 60#define CR0_SCPH_OFFSET				6
 61
 62#define CR0_SCPOL_OFFSET			7
 63
 64#define CR0_CSM_OFFSET				8
 65#define CR0_CSM_KEEP				0x0
 66/* ss_n be high for half sclk_out cycles */
 67#define CR0_CSM_HALF				0X1
 68/* ss_n be high for one sclk_out cycle */
 69#define CR0_CSM_ONE					0x2
 70
 71/* ss_n to sclk_out delay */
 72#define CR0_SSD_OFFSET				10
 73/*
 74 * The period between ss_n active and
 75 * sclk_out active is half sclk_out cycles
 76 */
 77#define CR0_SSD_HALF				0x0
 78/*
 79 * The period between ss_n active and
 80 * sclk_out active is one sclk_out cycle
 81 */
 82#define CR0_SSD_ONE					0x1
 83
 84#define CR0_EM_OFFSET				11
 85#define CR0_EM_LITTLE				0x0
 86#define CR0_EM_BIG					0x1
 87
 88#define CR0_FBM_OFFSET				12
 89#define CR0_FBM_MSB					0x0
 90#define CR0_FBM_LSB					0x1
 91
 92#define CR0_BHT_OFFSET				13
 93#define CR0_BHT_16BIT				0x0
 94#define CR0_BHT_8BIT				0x1
 95
 96#define CR0_RSD_OFFSET				14
 97
 98#define CR0_FRF_OFFSET				16
 99#define CR0_FRF_SPI					0x0
100#define CR0_FRF_SSP					0x1
101#define CR0_FRF_MICROWIRE			0x2
102
103#define CR0_XFM_OFFSET				18
104#define CR0_XFM_MASK				(0x03 << SPI_XFM_OFFSET)
105#define CR0_XFM_TR					0x0
106#define CR0_XFM_TO					0x1
107#define CR0_XFM_RO					0x2
108
109#define CR0_OPM_OFFSET				20
110#define CR0_OPM_MASTER				0x0
111#define CR0_OPM_SLAVE				0x1
112
113#define CR0_MTM_OFFSET				0x21
114
115/* Bit fields in SER, 2bit */
116#define SER_MASK					0x3
117
118/* Bit fields in SR, 5bit */
119#define SR_MASK						0x1f
120#define SR_BUSY						(1 << 0)
121#define SR_TF_FULL					(1 << 1)
122#define SR_TF_EMPTY					(1 << 2)
123#define SR_RF_EMPTY					(1 << 3)
124#define SR_RF_FULL					(1 << 4)
125
126/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
127#define INT_MASK					0x1f
128#define INT_TF_EMPTY				(1 << 0)
129#define INT_TF_OVERFLOW				(1 << 1)
130#define INT_RF_UNDERFLOW			(1 << 2)
131#define INT_RF_OVERFLOW				(1 << 3)
132#define INT_RF_FULL					(1 << 4)
133
134/* Bit fields in ICR, 4bit */
135#define ICR_MASK					0x0f
136#define ICR_ALL						(1 << 0)
137#define ICR_RF_UNDERFLOW			(1 << 1)
138#define ICR_RF_OVERFLOW				(1 << 2)
139#define ICR_TF_OVERFLOW				(1 << 3)
140
141/* Bit fields in DMACR */
142#define RF_DMA_EN					(1 << 0)
143#define TF_DMA_EN					(1 << 1)
144
145#define RXBUSY						(1 << 0)
146#define TXBUSY						(1 << 1)
147
148/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
149#define MAX_SCLK_OUT		50000000
150
151/*
152 * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
153 * the controller seems to hang when given 0x10000, so stick with this for now.
154 */
155#define ROCKCHIP_SPI_MAX_TRANLEN		0xffff
156
157#define ROCKCHIP_SPI_MAX_CS_NUM			2
158
159enum rockchip_ssi_type {
160	SSI_MOTO_SPI = 0,
161	SSI_TI_SSP,
162	SSI_NS_MICROWIRE,
163};
164
165struct rockchip_spi_dma_data {
166	struct dma_chan *ch;
167	enum dma_transfer_direction direction;
168	dma_addr_t addr;
169};
170
171struct rockchip_spi {
172	struct device *dev;
173	struct spi_master *master;
174
175	struct clk *spiclk;
176	struct clk *apb_pclk;
177
178	void __iomem *regs;
179	/*depth of the FIFO buffer */
180	u32 fifo_len;
181	/* max bus freq supported */
182	u32 max_freq;
183	/* supported slave numbers */
184	enum rockchip_ssi_type type;
185
186	u16 mode;
187	u8 tmode;
188	u8 bpw;
189	u8 n_bytes;
190	u32 rsd_nsecs;
191	unsigned len;
192	u32 speed;
193
194	const void *tx;
195	const void *tx_end;
196	void *rx;
197	void *rx_end;
198
199	u32 state;
200	/* protect state */
201	spinlock_t lock;
202
203	bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
204
205	u32 use_dma;
206	struct sg_table tx_sg;
207	struct sg_table rx_sg;
208	struct rockchip_spi_dma_data dma_rx;
209	struct rockchip_spi_dma_data dma_tx;
210	struct dma_slave_caps dma_caps;
211};
212
213static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
214{
215	writel_relaxed((enable ? 1 : 0), rs->regs + ROCKCHIP_SPI_SSIENR);
216}
217
218static inline void spi_set_clk(struct rockchip_spi *rs, u16 div)
219{
220	writel_relaxed(div, rs->regs + ROCKCHIP_SPI_BAUDR);
221}
222
223static inline void flush_fifo(struct rockchip_spi *rs)
224{
225	while (readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR))
226		readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
227}
228
229static inline void wait_for_idle(struct rockchip_spi *rs)
230{
231	unsigned long timeout = jiffies + msecs_to_jiffies(5);
232
233	do {
234		if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
235			return;
236	} while (!time_after(jiffies, timeout));
237
238	dev_warn(rs->dev, "spi controller is in busy state!\n");
239}
240
241static u32 get_fifo_len(struct rockchip_spi *rs)
242{
243	u32 fifo;
244
245	for (fifo = 2; fifo < 32; fifo++) {
246		writel_relaxed(fifo, rs->regs + ROCKCHIP_SPI_TXFTLR);
247		if (fifo != readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFTLR))
248			break;
249	}
250
251	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_TXFTLR);
252
253	return (fifo == 31) ? 0 : fifo;
254}
255
256static inline u32 tx_max(struct rockchip_spi *rs)
257{
258	u32 tx_left, tx_room;
259
260	tx_left = (rs->tx_end - rs->tx) / rs->n_bytes;
261	tx_room = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
262
263	return min(tx_left, tx_room);
264}
265
266static inline u32 rx_max(struct rockchip_spi *rs)
267{
268	u32 rx_left = (rs->rx_end - rs->rx) / rs->n_bytes;
269	u32 rx_room = (u32)readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
270
271	return min(rx_left, rx_room);
272}
273
274static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
275{
 
276	struct spi_master *master = spi->master;
277	struct rockchip_spi *rs = spi_master_get_devdata(master);
278	bool cs_asserted = !enable;
279
280	/* Return immediately for no-op */
281	if (cs_asserted == rs->cs_asserted[spi->chip_select])
282		return;
283
284	if (cs_asserted) {
285		/* Keep things powered as long as CS is asserted */
286		pm_runtime_get_sync(rs->dev);
287
288		ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER,
289				      BIT(spi->chip_select));
290	} else {
291		ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER,
292				      BIT(spi->chip_select));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
294		/* Drop reference from when we first asserted CS */
295		pm_runtime_put(rs->dev);
296	}
297
298	rs->cs_asserted[spi->chip_select] = cs_asserted;
299}
300
301static int rockchip_spi_prepare_message(struct spi_master *master,
302					struct spi_message *msg)
303{
304	struct rockchip_spi *rs = spi_master_get_devdata(master);
305	struct spi_device *spi = msg->spi;
306
307	rs->mode = spi->mode;
308
309	return 0;
310}
311
312static void rockchip_spi_handle_err(struct spi_master *master,
313				    struct spi_message *msg)
314{
315	unsigned long flags;
316	struct rockchip_spi *rs = spi_master_get_devdata(master);
317
318	spin_lock_irqsave(&rs->lock, flags);
319
320	/*
321	 * For DMA mode, we need terminate DMA channel and flush
322	 * fifo for the next transfer if DMA thansfer timeout.
323	 * handle_err() was called by core if transfer failed.
324	 * Maybe it is reasonable for error handling here.
325	 */
326	if (rs->use_dma) {
327		if (rs->state & RXBUSY) {
328			dmaengine_terminate_async(rs->dma_rx.ch);
329			flush_fifo(rs);
330		}
331
332		if (rs->state & TXBUSY)
333			dmaengine_terminate_async(rs->dma_tx.ch);
334	}
335
336	spin_unlock_irqrestore(&rs->lock, flags);
337}
338
339static int rockchip_spi_unprepare_message(struct spi_master *master,
340					  struct spi_message *msg)
341{
342	struct rockchip_spi *rs = spi_master_get_devdata(master);
343
344	spi_enable_chip(rs, 0);
345
346	return 0;
347}
348
349static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
350{
351	u32 max = tx_max(rs);
352	u32 txw = 0;
353
354	while (max--) {
355		if (rs->n_bytes == 1)
356			txw = *(u8 *)(rs->tx);
357		else
358			txw = *(u16 *)(rs->tx);
359
360		writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
361		rs->tx += rs->n_bytes;
362	}
363}
364
365static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
366{
367	u32 max = rx_max(rs);
368	u32 rxw;
369
370	while (max--) {
371		rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
372		if (rs->n_bytes == 1)
373			*(u8 *)(rs->rx) = (u8)rxw;
374		else
375			*(u16 *)(rs->rx) = (u16)rxw;
376		rs->rx += rs->n_bytes;
377	}
378}
379
380static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
381{
382	int remain = 0;
383
384	do {
385		if (rs->tx) {
386			remain = rs->tx_end - rs->tx;
387			rockchip_spi_pio_writer(rs);
388		}
389
390		if (rs->rx) {
391			remain = rs->rx_end - rs->rx;
392			rockchip_spi_pio_reader(rs);
393		}
394
395		cpu_relax();
396	} while (remain);
397
398	/* If tx, wait until the FIFO data completely. */
399	if (rs->tx)
400		wait_for_idle(rs);
401
402	spi_enable_chip(rs, 0);
403
404	return 0;
405}
406
407static void rockchip_spi_dma_rxcb(void *data)
408{
409	unsigned long flags;
410	struct rockchip_spi *rs = data;
411
412	spin_lock_irqsave(&rs->lock, flags);
413
414	rs->state &= ~RXBUSY;
415	if (!(rs->state & TXBUSY)) {
416		spi_enable_chip(rs, 0);
417		spi_finalize_current_transfer(rs->master);
418	}
419
420	spin_unlock_irqrestore(&rs->lock, flags);
421}
422
423static void rockchip_spi_dma_txcb(void *data)
424{
425	unsigned long flags;
426	struct rockchip_spi *rs = data;
427
428	/* Wait until the FIFO data completely. */
429	wait_for_idle(rs);
430
431	spin_lock_irqsave(&rs->lock, flags);
432
433	rs->state &= ~TXBUSY;
434	if (!(rs->state & RXBUSY)) {
435		spi_enable_chip(rs, 0);
436		spi_finalize_current_transfer(rs->master);
437	}
438
439	spin_unlock_irqrestore(&rs->lock, flags);
440}
441
442static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
443{
444	unsigned long flags;
445	struct dma_slave_config rxconf, txconf;
446	struct dma_async_tx_descriptor *rxdesc, *txdesc;
447
448	spin_lock_irqsave(&rs->lock, flags);
449	rs->state &= ~RXBUSY;
450	rs->state &= ~TXBUSY;
451	spin_unlock_irqrestore(&rs->lock, flags);
452
453	rxdesc = NULL;
454	if (rs->rx) {
455		rxconf.direction = rs->dma_rx.direction;
456		rxconf.src_addr = rs->dma_rx.addr;
457		rxconf.src_addr_width = rs->n_bytes;
458		if (rs->dma_caps.max_burst > 4)
459			rxconf.src_maxburst = 4;
460		else
461			rxconf.src_maxburst = 1;
462		dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
463
464		rxdesc = dmaengine_prep_slave_sg(
465				rs->dma_rx.ch,
466				rs->rx_sg.sgl, rs->rx_sg.nents,
467				rs->dma_rx.direction, DMA_PREP_INTERRUPT);
468		if (!rxdesc)
469			return -EINVAL;
470
471		rxdesc->callback = rockchip_spi_dma_rxcb;
472		rxdesc->callback_param = rs;
473	}
474
475	txdesc = NULL;
476	if (rs->tx) {
477		txconf.direction = rs->dma_tx.direction;
478		txconf.dst_addr = rs->dma_tx.addr;
479		txconf.dst_addr_width = rs->n_bytes;
480		if (rs->dma_caps.max_burst > 4)
481			txconf.dst_maxburst = 4;
482		else
483			txconf.dst_maxburst = 1;
484		dmaengine_slave_config(rs->dma_tx.ch, &txconf);
485
486		txdesc = dmaengine_prep_slave_sg(
487				rs->dma_tx.ch,
488				rs->tx_sg.sgl, rs->tx_sg.nents,
489				rs->dma_tx.direction, DMA_PREP_INTERRUPT);
490		if (!txdesc) {
491			if (rxdesc)
492				dmaengine_terminate_sync(rs->dma_rx.ch);
493			return -EINVAL;
494		}
495
496		txdesc->callback = rockchip_spi_dma_txcb;
497		txdesc->callback_param = rs;
498	}
499
500	/* rx must be started before tx due to spi instinct */
501	if (rxdesc) {
502		spin_lock_irqsave(&rs->lock, flags);
503		rs->state |= RXBUSY;
504		spin_unlock_irqrestore(&rs->lock, flags);
505		dmaengine_submit(rxdesc);
506		dma_async_issue_pending(rs->dma_rx.ch);
507	}
508
509	if (txdesc) {
510		spin_lock_irqsave(&rs->lock, flags);
511		rs->state |= TXBUSY;
512		spin_unlock_irqrestore(&rs->lock, flags);
513		dmaengine_submit(txdesc);
514		dma_async_issue_pending(rs->dma_tx.ch);
515	}
516
517	return 0;
518}
519
520static void rockchip_spi_config(struct rockchip_spi *rs)
521{
522	u32 div = 0;
523	u32 dmacr = 0;
524	int rsd = 0;
525
526	u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
527		| (CR0_SSD_ONE << CR0_SSD_OFFSET)
528		| (CR0_EM_BIG << CR0_EM_OFFSET);
529
530	cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
531	cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
532	cr0 |= (rs->tmode << CR0_XFM_OFFSET);
533	cr0 |= (rs->type << CR0_FRF_OFFSET);
534
535	if (rs->use_dma) {
536		if (rs->tx)
537			dmacr |= TF_DMA_EN;
538		if (rs->rx)
539			dmacr |= RF_DMA_EN;
540	}
541
542	if (WARN_ON(rs->speed > MAX_SCLK_OUT))
543		rs->speed = MAX_SCLK_OUT;
544
545	/* the minimum divisor is 2 */
546	if (rs->max_freq < 2 * rs->speed) {
547		clk_set_rate(rs->spiclk, 2 * rs->speed);
548		rs->max_freq = clk_get_rate(rs->spiclk);
549	}
550
551	/* div doesn't support odd number */
552	div = DIV_ROUND_UP(rs->max_freq, rs->speed);
553	div = (div + 1) & 0xfffe;
554
555	/* Rx sample delay is expressed in parent clock cycles (max 3) */
556	rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8),
557				1000000000 >> 8);
558	if (!rsd && rs->rsd_nsecs) {
559		pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n",
560			     rs->max_freq, rs->rsd_nsecs);
561	} else if (rsd > 3) {
562		rsd = 3;
563		pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n",
564			     rs->max_freq, rs->rsd_nsecs,
565			     rsd * 1000000000U / rs->max_freq);
566	}
567	cr0 |= rsd << CR0_RSD_OFFSET;
568
569	writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
570
571	if (rs->n_bytes == 1)
572		writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
573	else if (rs->n_bytes == 2)
574		writel_relaxed((rs->len / 2) - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
575	else
576		writel_relaxed((rs->len * 2) - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
577
578	writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
579	writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
580
581	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR);
582	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
583	writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
584
585	spi_set_clk(rs, div);
586
587	dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
588}
589
590static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
591{
592	return ROCKCHIP_SPI_MAX_TRANLEN;
593}
594
595static int rockchip_spi_transfer_one(
596		struct spi_master *master,
597		struct spi_device *spi,
598		struct spi_transfer *xfer)
599{
600	int ret = 0;
601	struct rockchip_spi *rs = spi_master_get_devdata(master);
602
603	WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
604		(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
605
606	if (!xfer->tx_buf && !xfer->rx_buf) {
607		dev_err(rs->dev, "No buffer for transfer\n");
608		return -EINVAL;
609	}
610
611	if (xfer->len > ROCKCHIP_SPI_MAX_TRANLEN) {
612		dev_err(rs->dev, "Transfer is too long (%d)\n", xfer->len);
613		return -EINVAL;
614	}
615
616	rs->speed = xfer->speed_hz;
617	rs->bpw = xfer->bits_per_word;
618	rs->n_bytes = rs->bpw >> 3;
619
620	rs->tx = xfer->tx_buf;
621	rs->tx_end = rs->tx + xfer->len;
622	rs->rx = xfer->rx_buf;
623	rs->rx_end = rs->rx + xfer->len;
624	rs->len = xfer->len;
625
626	rs->tx_sg = xfer->tx_sg;
627	rs->rx_sg = xfer->rx_sg;
628
629	if (rs->tx && rs->rx)
630		rs->tmode = CR0_XFM_TR;
631	else if (rs->tx)
632		rs->tmode = CR0_XFM_TO;
633	else if (rs->rx)
634		rs->tmode = CR0_XFM_RO;
635
636	/* we need prepare dma before spi was enabled */
637	if (master->can_dma && master->can_dma(master, spi, xfer))
638		rs->use_dma = 1;
639	else
640		rs->use_dma = 0;
641
642	rockchip_spi_config(rs);
643
644	if (rs->use_dma) {
645		if (rs->tmode == CR0_XFM_RO) {
646			/* rx: dma must be prepared first */
647			ret = rockchip_spi_prepare_dma(rs);
648			spi_enable_chip(rs, 1);
649		} else {
650			/* tx or tr: spi must be enabled first */
651			spi_enable_chip(rs, 1);
652			ret = rockchip_spi_prepare_dma(rs);
653		}
654		/* successful DMA prepare means the transfer is in progress */
655		ret = ret ? ret : 1;
656	} else {
657		spi_enable_chip(rs, 1);
658		ret = rockchip_spi_pio_transfer(rs);
659	}
660
661	return ret;
662}
663
664static bool rockchip_spi_can_dma(struct spi_master *master,
665				 struct spi_device *spi,
666				 struct spi_transfer *xfer)
667{
668	struct rockchip_spi *rs = spi_master_get_devdata(master);
669
670	return (xfer->len > rs->fifo_len);
671}
672
673static int rockchip_spi_probe(struct platform_device *pdev)
674{
675	int ret;
676	struct rockchip_spi *rs;
677	struct spi_master *master;
678	struct resource *mem;
679	u32 rsd_nsecs;
680
681	master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
682	if (!master)
683		return -ENOMEM;
684
685	platform_set_drvdata(pdev, master);
686
687	rs = spi_master_get_devdata(master);
688
689	/* Get basic io resource and map it */
690	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
691	rs->regs = devm_ioremap_resource(&pdev->dev, mem);
692	if (IS_ERR(rs->regs)) {
693		ret =  PTR_ERR(rs->regs);
694		goto err_put_master;
695	}
696
697	rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
698	if (IS_ERR(rs->apb_pclk)) {
699		dev_err(&pdev->dev, "Failed to get apb_pclk\n");
700		ret = PTR_ERR(rs->apb_pclk);
701		goto err_put_master;
702	}
703
704	rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
705	if (IS_ERR(rs->spiclk)) {
706		dev_err(&pdev->dev, "Failed to get spi_pclk\n");
707		ret = PTR_ERR(rs->spiclk);
708		goto err_put_master;
709	}
710
711	ret = clk_prepare_enable(rs->apb_pclk);
712	if (ret < 0) {
713		dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
714		goto err_put_master;
715	}
716
717	ret = clk_prepare_enable(rs->spiclk);
718	if (ret < 0) {
719		dev_err(&pdev->dev, "Failed to enable spi_clk\n");
720		goto err_disable_apbclk;
721	}
722
723	spi_enable_chip(rs, 0);
724
725	rs->type = SSI_MOTO_SPI;
726	rs->master = master;
727	rs->dev = &pdev->dev;
728	rs->max_freq = clk_get_rate(rs->spiclk);
729
730	if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
731				  &rsd_nsecs))
732		rs->rsd_nsecs = rsd_nsecs;
733
734	rs->fifo_len = get_fifo_len(rs);
735	if (!rs->fifo_len) {
736		dev_err(&pdev->dev, "Failed to get fifo length\n");
737		ret = -EINVAL;
738		goto err_disable_spiclk;
739	}
740
741	spin_lock_init(&rs->lock);
742
743	pm_runtime_set_active(&pdev->dev);
744	pm_runtime_enable(&pdev->dev);
745
746	master->auto_runtime_pm = true;
747	master->bus_num = pdev->id;
748	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
749	master->num_chipselect = ROCKCHIP_SPI_MAX_CS_NUM;
750	master->dev.of_node = pdev->dev.of_node;
751	master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
752
753	master->set_cs = rockchip_spi_set_cs;
754	master->prepare_message = rockchip_spi_prepare_message;
755	master->unprepare_message = rockchip_spi_unprepare_message;
756	master->transfer_one = rockchip_spi_transfer_one;
757	master->max_transfer_size = rockchip_spi_max_transfer_size;
758	master->handle_err = rockchip_spi_handle_err;
759	master->flags = SPI_MASTER_GPIO_SS;
760
761	rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
762	if (IS_ERR(rs->dma_tx.ch)) {
763		/* Check tx to see if we need defer probing driver */
764		if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
765			ret = -EPROBE_DEFER;
766			goto err_disable_pm_runtime;
767		}
768		dev_warn(rs->dev, "Failed to request TX DMA channel\n");
769		rs->dma_tx.ch = NULL;
770	}
771
772	rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
773	if (IS_ERR(rs->dma_rx.ch)) {
774		if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
 
 
775			ret = -EPROBE_DEFER;
776			goto err_free_dma_tx;
777		}
778		dev_warn(rs->dev, "Failed to request RX DMA channel\n");
779		rs->dma_rx.ch = NULL;
780	}
781
782	if (rs->dma_tx.ch && rs->dma_rx.ch) {
783		dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
784		rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
785		rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
786		rs->dma_tx.direction = DMA_MEM_TO_DEV;
787		rs->dma_rx.direction = DMA_DEV_TO_MEM;
788
789		master->can_dma = rockchip_spi_can_dma;
790		master->dma_tx = rs->dma_tx.ch;
791		master->dma_rx = rs->dma_rx.ch;
792	}
793
794	ret = devm_spi_register_master(&pdev->dev, master);
795	if (ret < 0) {
796		dev_err(&pdev->dev, "Failed to register master\n");
797		goto err_free_dma_rx;
798	}
799
800	return 0;
801
802err_free_dma_rx:
 
 
 
803	if (rs->dma_rx.ch)
804		dma_release_channel(rs->dma_rx.ch);
805err_free_dma_tx:
806	if (rs->dma_tx.ch)
807		dma_release_channel(rs->dma_tx.ch);
808err_disable_pm_runtime:
809	pm_runtime_disable(&pdev->dev);
810err_disable_spiclk:
811	clk_disable_unprepare(rs->spiclk);
812err_disable_apbclk:
813	clk_disable_unprepare(rs->apb_pclk);
814err_put_master:
815	spi_master_put(master);
816
817	return ret;
818}
819
820static int rockchip_spi_remove(struct platform_device *pdev)
821{
822	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
823	struct rockchip_spi *rs = spi_master_get_devdata(master);
824
825	pm_runtime_get_sync(&pdev->dev);
826
827	clk_disable_unprepare(rs->spiclk);
828	clk_disable_unprepare(rs->apb_pclk);
829
830	pm_runtime_put_noidle(&pdev->dev);
831	pm_runtime_disable(&pdev->dev);
832	pm_runtime_set_suspended(&pdev->dev);
833
834	if (rs->dma_tx.ch)
835		dma_release_channel(rs->dma_tx.ch);
836	if (rs->dma_rx.ch)
837		dma_release_channel(rs->dma_rx.ch);
838
839	spi_master_put(master);
840
841	return 0;
842}
843
844#ifdef CONFIG_PM_SLEEP
845static int rockchip_spi_suspend(struct device *dev)
846{
847	int ret;
848	struct spi_master *master = dev_get_drvdata(dev);
849	struct rockchip_spi *rs = spi_master_get_devdata(master);
850
851	ret = spi_master_suspend(rs->master);
852	if (ret < 0)
853		return ret;
854
855	ret = pm_runtime_force_suspend(dev);
856	if (ret < 0)
857		return ret;
 
858
859	pinctrl_pm_select_sleep_state(dev);
860
861	return 0;
862}
863
864static int rockchip_spi_resume(struct device *dev)
865{
866	int ret;
867	struct spi_master *master = dev_get_drvdata(dev);
868	struct rockchip_spi *rs = spi_master_get_devdata(master);
869
870	pinctrl_pm_select_default_state(dev);
871
872	ret = pm_runtime_force_resume(dev);
873	if (ret < 0)
874		return ret;
 
 
 
 
 
 
875
876	ret = spi_master_resume(rs->master);
877	if (ret < 0) {
878		clk_disable_unprepare(rs->spiclk);
879		clk_disable_unprepare(rs->apb_pclk);
880	}
881
882	return 0;
883}
884#endif /* CONFIG_PM_SLEEP */
885
886#ifdef CONFIG_PM
887static int rockchip_spi_runtime_suspend(struct device *dev)
888{
889	struct spi_master *master = dev_get_drvdata(dev);
890	struct rockchip_spi *rs = spi_master_get_devdata(master);
891
892	clk_disable_unprepare(rs->spiclk);
893	clk_disable_unprepare(rs->apb_pclk);
894
895	return 0;
896}
897
898static int rockchip_spi_runtime_resume(struct device *dev)
899{
900	int ret;
901	struct spi_master *master = dev_get_drvdata(dev);
902	struct rockchip_spi *rs = spi_master_get_devdata(master);
903
904	ret = clk_prepare_enable(rs->apb_pclk);
905	if (ret < 0)
906		return ret;
907
908	ret = clk_prepare_enable(rs->spiclk);
909	if (ret < 0)
910		clk_disable_unprepare(rs->apb_pclk);
911
912	return 0;
913}
914#endif /* CONFIG_PM */
915
916static const struct dev_pm_ops rockchip_spi_pm = {
917	SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
918	SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
919			   rockchip_spi_runtime_resume, NULL)
920};
921
922static const struct of_device_id rockchip_spi_dt_match[] = {
923	{ .compatible = "rockchip,rv1108-spi", },
924	{ .compatible = "rockchip,rk3036-spi", },
925	{ .compatible = "rockchip,rk3066-spi", },
926	{ .compatible = "rockchip,rk3188-spi", },
927	{ .compatible = "rockchip,rk3228-spi", },
928	{ .compatible = "rockchip,rk3288-spi", },
929	{ .compatible = "rockchip,rk3368-spi", },
930	{ .compatible = "rockchip,rk3399-spi", },
931	{ },
932};
933MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
934
935static struct platform_driver rockchip_spi_driver = {
936	.driver = {
937		.name	= DRIVER_NAME,
938		.pm = &rockchip_spi_pm,
939		.of_match_table = of_match_ptr(rockchip_spi_dt_match),
940	},
941	.probe = rockchip_spi_probe,
942	.remove = rockchip_spi_remove,
943};
944
945module_platform_driver(rockchip_spi_driver);
946
947MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
948MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
949MODULE_LICENSE("GPL v2");