Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SPI bus driver for CSR SiRFprimaII
4 *
5 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/clk.h>
12#include <linux/completion.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/bitops.h>
17#include <linux/err.h>
18#include <linux/platform_device.h>
19#include <linux/of_gpio.h>
20#include <linux/spi/spi.h>
21#include <linux/spi/spi_bitbang.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-direction.h>
24#include <linux/dma-mapping.h>
25#include <linux/reset.h>
26
27#define DRIVER_NAME "sirfsoc_spi"
28/* SPI CTRL register defines */
29#define SIRFSOC_SPI_SLV_MODE BIT(16)
30#define SIRFSOC_SPI_CMD_MODE BIT(17)
31#define SIRFSOC_SPI_CS_IO_OUT BIT(18)
32#define SIRFSOC_SPI_CS_IO_MODE BIT(19)
33#define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20)
34#define SIRFSOC_SPI_CS_IDLE_STAT BIT(21)
35#define SIRFSOC_SPI_TRAN_MSB BIT(22)
36#define SIRFSOC_SPI_DRV_POS_EDGE BIT(23)
37#define SIRFSOC_SPI_CS_HOLD_TIME BIT(24)
38#define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25)
39#define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26)
40#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
41#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
42#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
43#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
44#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
45#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
46
47/* Interrupt Enable */
48#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
49#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
50#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
51#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
52#define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
53#define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
54#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
55#define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
56#define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8)
57#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
58#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
59
60/* Interrupt status */
61#define SIRFSOC_SPI_RX_DONE BIT(0)
62#define SIRFSOC_SPI_TX_DONE BIT(1)
63#define SIRFSOC_SPI_RX_OFLOW BIT(2)
64#define SIRFSOC_SPI_TX_UFLOW BIT(3)
65#define SIRFSOC_SPI_RX_IO_DMA BIT(4)
66#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
67#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
68#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
69#define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9)
70#define SIRFSOC_SPI_FRM_END BIT(10)
71
72/* TX RX enable */
73#define SIRFSOC_SPI_RX_EN BIT(0)
74#define SIRFSOC_SPI_TX_EN BIT(1)
75#define SIRFSOC_SPI_CMD_TX_EN BIT(2)
76
77#define SIRFSOC_SPI_IO_MODE_SEL BIT(0)
78#define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2)
79
80/* FIFO OPs */
81#define SIRFSOC_SPI_FIFO_RESET BIT(0)
82#define SIRFSOC_SPI_FIFO_START BIT(1)
83
84/* FIFO CTRL */
85#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
86#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
87#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
88/* USP related */
89#define SIRFSOC_USP_SYNC_MODE BIT(0)
90#define SIRFSOC_USP_SLV_MODE BIT(1)
91#define SIRFSOC_USP_LSB BIT(4)
92#define SIRFSOC_USP_EN BIT(5)
93#define SIRFSOC_USP_RXD_FALLING_EDGE BIT(6)
94#define SIRFSOC_USP_TXD_FALLING_EDGE BIT(7)
95#define SIRFSOC_USP_CS_HIGH_VALID BIT(9)
96#define SIRFSOC_USP_SCLK_IDLE_STAT BIT(11)
97#define SIRFSOC_USP_TFS_IO_MODE BIT(14)
98#define SIRFSOC_USP_TFS_IO_INPUT BIT(19)
99
100#define SIRFSOC_USP_RXD_DELAY_LEN_MASK 0xFF
101#define SIRFSOC_USP_TXD_DELAY_LEN_MASK 0xFF
102#define SIRFSOC_USP_RXD_DELAY_OFFSET 0
103#define SIRFSOC_USP_TXD_DELAY_OFFSET 8
104#define SIRFSOC_USP_RXD_DELAY_LEN 1
105#define SIRFSOC_USP_TXD_DELAY_LEN 1
106#define SIRFSOC_USP_CLK_DIVISOR_OFFSET 21
107#define SIRFSOC_USP_CLK_DIVISOR_MASK 0x3FF
108#define SIRFSOC_USP_CLK_10_11_MASK 0x3
109#define SIRFSOC_USP_CLK_10_11_OFFSET 30
110#define SIRFSOC_USP_CLK_12_15_MASK 0xF
111#define SIRFSOC_USP_CLK_12_15_OFFSET 24
112
113#define SIRFSOC_USP_TX_DATA_OFFSET 0
114#define SIRFSOC_USP_TX_SYNC_OFFSET 8
115#define SIRFSOC_USP_TX_FRAME_OFFSET 16
116#define SIRFSOC_USP_TX_SHIFTER_OFFSET 24
117
118#define SIRFSOC_USP_TX_DATA_MASK 0xFF
119#define SIRFSOC_USP_TX_SYNC_MASK 0xFF
120#define SIRFSOC_USP_TX_FRAME_MASK 0xFF
121#define SIRFSOC_USP_TX_SHIFTER_MASK 0x1F
122
123#define SIRFSOC_USP_RX_DATA_OFFSET 0
124#define SIRFSOC_USP_RX_FRAME_OFFSET 8
125#define SIRFSOC_USP_RX_SHIFTER_OFFSET 16
126
127#define SIRFSOC_USP_RX_DATA_MASK 0xFF
128#define SIRFSOC_USP_RX_FRAME_MASK 0xFF
129#define SIRFSOC_USP_RX_SHIFTER_MASK 0x1F
130#define SIRFSOC_USP_CS_HIGH_VALUE BIT(1)
131
132#define SIRFSOC_SPI_FIFO_SC_OFFSET 0
133#define SIRFSOC_SPI_FIFO_LC_OFFSET 10
134#define SIRFSOC_SPI_FIFO_HC_OFFSET 20
135
136#define SIRFSOC_SPI_FIFO_FULL_MASK(s) (1 << ((s)->fifo_full_offset))
137#define SIRFSOC_SPI_FIFO_EMPTY_MASK(s) (1 << ((s)->fifo_full_offset + 1))
138#define SIRFSOC_SPI_FIFO_THD_MASK(s) ((s)->fifo_size - 1)
139#define SIRFSOC_SPI_FIFO_THD_OFFSET 2
140#define SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(s, val) \
141 ((val) & (s)->fifo_level_chk_mask)
142
143enum sirf_spi_type {
144 SIRF_REAL_SPI,
145 SIRF_USP_SPI_P2,
146 SIRF_USP_SPI_A7,
147};
148
149/*
150 * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
151 * due to the limitation of dma controller
152 */
153
154#define ALIGNED(x) (!((u32)x & 0x3))
155#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
156 ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
157
158#define SIRFSOC_MAX_CMD_BYTES 4
159#define SIRFSOC_SPI_DEFAULT_FRQ 1000000
160
161struct sirf_spi_register {
162 /*SPI and USP-SPI common*/
163 u32 tx_rx_en;
164 u32 int_en;
165 u32 int_st;
166 u32 tx_dma_io_ctrl;
167 u32 tx_dma_io_len;
168 u32 txfifo_ctrl;
169 u32 txfifo_level_chk;
170 u32 txfifo_op;
171 u32 txfifo_st;
172 u32 txfifo_data;
173 u32 rx_dma_io_ctrl;
174 u32 rx_dma_io_len;
175 u32 rxfifo_ctrl;
176 u32 rxfifo_level_chk;
177 u32 rxfifo_op;
178 u32 rxfifo_st;
179 u32 rxfifo_data;
180 /*SPI self*/
181 u32 spi_ctrl;
182 u32 spi_cmd;
183 u32 spi_dummy_delay_ctrl;
184 /*USP-SPI self*/
185 u32 usp_mode1;
186 u32 usp_mode2;
187 u32 usp_tx_frame_ctrl;
188 u32 usp_rx_frame_ctrl;
189 u32 usp_pin_io_data;
190 u32 usp_risc_dsp_mode;
191 u32 usp_async_param_reg;
192 u32 usp_irda_x_mode_div;
193 u32 usp_sm_cfg;
194 u32 usp_int_en_clr;
195};
196
197static const struct sirf_spi_register real_spi_register = {
198 .tx_rx_en = 0x8,
199 .int_en = 0xc,
200 .int_st = 0x10,
201 .tx_dma_io_ctrl = 0x100,
202 .tx_dma_io_len = 0x104,
203 .txfifo_ctrl = 0x108,
204 .txfifo_level_chk = 0x10c,
205 .txfifo_op = 0x110,
206 .txfifo_st = 0x114,
207 .txfifo_data = 0x118,
208 .rx_dma_io_ctrl = 0x120,
209 .rx_dma_io_len = 0x124,
210 .rxfifo_ctrl = 0x128,
211 .rxfifo_level_chk = 0x12c,
212 .rxfifo_op = 0x130,
213 .rxfifo_st = 0x134,
214 .rxfifo_data = 0x138,
215 .spi_ctrl = 0x0,
216 .spi_cmd = 0x4,
217 .spi_dummy_delay_ctrl = 0x144,
218};
219
220static const struct sirf_spi_register usp_spi_register = {
221 .tx_rx_en = 0x10,
222 .int_en = 0x14,
223 .int_st = 0x18,
224 .tx_dma_io_ctrl = 0x100,
225 .tx_dma_io_len = 0x104,
226 .txfifo_ctrl = 0x108,
227 .txfifo_level_chk = 0x10c,
228 .txfifo_op = 0x110,
229 .txfifo_st = 0x114,
230 .txfifo_data = 0x118,
231 .rx_dma_io_ctrl = 0x120,
232 .rx_dma_io_len = 0x124,
233 .rxfifo_ctrl = 0x128,
234 .rxfifo_level_chk = 0x12c,
235 .rxfifo_op = 0x130,
236 .rxfifo_st = 0x134,
237 .rxfifo_data = 0x138,
238 .usp_mode1 = 0x0,
239 .usp_mode2 = 0x4,
240 .usp_tx_frame_ctrl = 0x8,
241 .usp_rx_frame_ctrl = 0xc,
242 .usp_pin_io_data = 0x1c,
243 .usp_risc_dsp_mode = 0x20,
244 .usp_async_param_reg = 0x24,
245 .usp_irda_x_mode_div = 0x28,
246 .usp_sm_cfg = 0x2c,
247 .usp_int_en_clr = 0x140,
248};
249
250struct sirfsoc_spi {
251 struct spi_bitbang bitbang;
252 struct completion rx_done;
253 struct completion tx_done;
254
255 void __iomem *base;
256 u32 ctrl_freq; /* SPI controller clock speed */
257 struct clk *clk;
258
259 /* rx & tx bufs from the spi_transfer */
260 const void *tx;
261 void *rx;
262
263 /* place received word into rx buffer */
264 void (*rx_word) (struct sirfsoc_spi *);
265 /* get word from tx buffer for sending */
266 void (*tx_word) (struct sirfsoc_spi *);
267
268 /* number of words left to be tranmitted/received */
269 unsigned int left_tx_word;
270 unsigned int left_rx_word;
271
272 /* rx & tx DMA channels */
273 struct dma_chan *rx_chan;
274 struct dma_chan *tx_chan;
275 dma_addr_t src_start;
276 dma_addr_t dst_start;
277 int word_width; /* in bytes */
278
279 /*
280 * if tx size is not more than 4 and rx size is NULL, use
281 * command model
282 */
283 bool tx_by_cmd;
284 bool hw_cs;
285 enum sirf_spi_type type;
286 const struct sirf_spi_register *regs;
287 unsigned int fifo_size;
288 /* fifo empty offset is (fifo full offset + 1)*/
289 unsigned int fifo_full_offset;
290 /* fifo_level_chk_mask is (fifo_size/4 - 1) */
291 unsigned int fifo_level_chk_mask;
292 unsigned int dat_max_frm_len;
293};
294
295struct sirf_spi_comp_data {
296 const struct sirf_spi_register *regs;
297 enum sirf_spi_type type;
298 unsigned int dat_max_frm_len;
299 unsigned int fifo_size;
300 void (*hwinit)(struct sirfsoc_spi *sspi);
301};
302
303static void sirfsoc_usp_hwinit(struct sirfsoc_spi *sspi)
304{
305 /* reset USP and let USP can operate */
306 writel(readl(sspi->base + sspi->regs->usp_mode1) &
307 ~SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
308 writel(readl(sspi->base + sspi->regs->usp_mode1) |
309 SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
310}
311
312static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
313{
314 u32 data;
315 u8 *rx = sspi->rx;
316
317 data = readl(sspi->base + sspi->regs->rxfifo_data);
318
319 if (rx) {
320 *rx++ = (u8) data;
321 sspi->rx = rx;
322 }
323
324 sspi->left_rx_word--;
325}
326
327static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
328{
329 u32 data = 0;
330 const u8 *tx = sspi->tx;
331
332 if (tx) {
333 data = *tx++;
334 sspi->tx = tx;
335 }
336 writel(data, sspi->base + sspi->regs->txfifo_data);
337 sspi->left_tx_word--;
338}
339
340static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
341{
342 u32 data;
343 u16 *rx = sspi->rx;
344
345 data = readl(sspi->base + sspi->regs->rxfifo_data);
346
347 if (rx) {
348 *rx++ = (u16) data;
349 sspi->rx = rx;
350 }
351
352 sspi->left_rx_word--;
353}
354
355static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
356{
357 u32 data = 0;
358 const u16 *tx = sspi->tx;
359
360 if (tx) {
361 data = *tx++;
362 sspi->tx = tx;
363 }
364
365 writel(data, sspi->base + sspi->regs->txfifo_data);
366 sspi->left_tx_word--;
367}
368
369static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
370{
371 u32 data;
372 u32 *rx = sspi->rx;
373
374 data = readl(sspi->base + sspi->regs->rxfifo_data);
375
376 if (rx) {
377 *rx++ = (u32) data;
378 sspi->rx = rx;
379 }
380
381 sspi->left_rx_word--;
382
383}
384
385static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
386{
387 u32 data = 0;
388 const u32 *tx = sspi->tx;
389
390 if (tx) {
391 data = *tx++;
392 sspi->tx = tx;
393 }
394
395 writel(data, sspi->base + sspi->regs->txfifo_data);
396 sspi->left_tx_word--;
397}
398
399static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
400{
401 struct sirfsoc_spi *sspi = dev_id;
402 u32 spi_stat;
403
404 spi_stat = readl(sspi->base + sspi->regs->int_st);
405 if (sspi->tx_by_cmd && sspi->type == SIRF_REAL_SPI
406 && (spi_stat & SIRFSOC_SPI_FRM_END)) {
407 complete(&sspi->tx_done);
408 writel(0x0, sspi->base + sspi->regs->int_en);
409 writel(readl(sspi->base + sspi->regs->int_st),
410 sspi->base + sspi->regs->int_st);
411 return IRQ_HANDLED;
412 }
413 /* Error Conditions */
414 if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
415 spi_stat & SIRFSOC_SPI_TX_UFLOW) {
416 complete(&sspi->tx_done);
417 complete(&sspi->rx_done);
418 switch (sspi->type) {
419 case SIRF_REAL_SPI:
420 case SIRF_USP_SPI_P2:
421 writel(0x0, sspi->base + sspi->regs->int_en);
422 break;
423 case SIRF_USP_SPI_A7:
424 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
425 break;
426 }
427 writel(readl(sspi->base + sspi->regs->int_st),
428 sspi->base + sspi->regs->int_st);
429 return IRQ_HANDLED;
430 }
431 if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
432 complete(&sspi->tx_done);
433 while (!(readl(sspi->base + sspi->regs->int_st) &
434 SIRFSOC_SPI_RX_IO_DMA))
435 cpu_relax();
436 complete(&sspi->rx_done);
437 switch (sspi->type) {
438 case SIRF_REAL_SPI:
439 case SIRF_USP_SPI_P2:
440 writel(0x0, sspi->base + sspi->regs->int_en);
441 break;
442 case SIRF_USP_SPI_A7:
443 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
444 break;
445 }
446 writel(readl(sspi->base + sspi->regs->int_st),
447 sspi->base + sspi->regs->int_st);
448
449 return IRQ_HANDLED;
450}
451
452static void spi_sirfsoc_dma_fini_callback(void *data)
453{
454 struct completion *dma_complete = data;
455
456 complete(dma_complete);
457}
458
459static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
460 struct spi_transfer *t)
461{
462 struct sirfsoc_spi *sspi;
463 int timeout = t->len * 10;
464 u32 cmd;
465
466 sspi = spi_master_get_devdata(spi->master);
467 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
468 writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
469 memcpy(&cmd, sspi->tx, t->len);
470 if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
471 cmd = cpu_to_be32(cmd) >>
472 ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
473 if (sspi->word_width == 2 && t->len == 4 &&
474 (!(spi->mode & SPI_LSB_FIRST)))
475 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
476 writel(cmd, sspi->base + sspi->regs->spi_cmd);
477 writel(SIRFSOC_SPI_FRM_END_INT_EN,
478 sspi->base + sspi->regs->int_en);
479 writel(SIRFSOC_SPI_CMD_TX_EN,
480 sspi->base + sspi->regs->tx_rx_en);
481 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
482 dev_err(&spi->dev, "cmd transfer timeout\n");
483 return;
484 }
485 sspi->left_rx_word -= t->len;
486}
487
488static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
489 struct spi_transfer *t)
490{
491 struct sirfsoc_spi *sspi;
492 struct dma_async_tx_descriptor *rx_desc, *tx_desc;
493 int timeout = t->len * 10;
494
495 sspi = spi_master_get_devdata(spi->master);
496 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
497 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
498 switch (sspi->type) {
499 case SIRF_REAL_SPI:
500 writel(SIRFSOC_SPI_FIFO_START,
501 sspi->base + sspi->regs->rxfifo_op);
502 writel(SIRFSOC_SPI_FIFO_START,
503 sspi->base + sspi->regs->txfifo_op);
504 writel(0, sspi->base + sspi->regs->int_en);
505 break;
506 case SIRF_USP_SPI_P2:
507 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
508 writel(0x0, sspi->base + sspi->regs->txfifo_op);
509 writel(0, sspi->base + sspi->regs->int_en);
510 break;
511 case SIRF_USP_SPI_A7:
512 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
513 writel(0x0, sspi->base + sspi->regs->txfifo_op);
514 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
515 break;
516 }
517 writel(readl(sspi->base + sspi->regs->int_st),
518 sspi->base + sspi->regs->int_st);
519 if (sspi->left_tx_word < sspi->dat_max_frm_len) {
520 switch (sspi->type) {
521 case SIRF_REAL_SPI:
522 writel(readl(sspi->base + sspi->regs->spi_ctrl) |
523 SIRFSOC_SPI_ENA_AUTO_CLR |
524 SIRFSOC_SPI_MUL_DAT_MODE,
525 sspi->base + sspi->regs->spi_ctrl);
526 writel(sspi->left_tx_word - 1,
527 sspi->base + sspi->regs->tx_dma_io_len);
528 writel(sspi->left_tx_word - 1,
529 sspi->base + sspi->regs->rx_dma_io_len);
530 break;
531 case SIRF_USP_SPI_P2:
532 case SIRF_USP_SPI_A7:
533 /*USP simulate SPI, tx/rx_dma_io_len indicates bytes*/
534 writel(sspi->left_tx_word * sspi->word_width,
535 sspi->base + sspi->regs->tx_dma_io_len);
536 writel(sspi->left_tx_word * sspi->word_width,
537 sspi->base + sspi->regs->rx_dma_io_len);
538 break;
539 }
540 } else {
541 if (sspi->type == SIRF_REAL_SPI)
542 writel(readl(sspi->base + sspi->regs->spi_ctrl),
543 sspi->base + sspi->regs->spi_ctrl);
544 writel(0, sspi->base + sspi->regs->tx_dma_io_len);
545 writel(0, sspi->base + sspi->regs->rx_dma_io_len);
546 }
547 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
548 (t->tx_buf != t->rx_buf) ?
549 DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
550 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
551 sspi->dst_start, t->len, DMA_DEV_TO_MEM,
552 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
553 rx_desc->callback = spi_sirfsoc_dma_fini_callback;
554 rx_desc->callback_param = &sspi->rx_done;
555
556 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
557 (t->tx_buf != t->rx_buf) ?
558 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
559 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
560 sspi->src_start, t->len, DMA_MEM_TO_DEV,
561 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
562 tx_desc->callback = spi_sirfsoc_dma_fini_callback;
563 tx_desc->callback_param = &sspi->tx_done;
564
565 dmaengine_submit(tx_desc);
566 dmaengine_submit(rx_desc);
567 dma_async_issue_pending(sspi->tx_chan);
568 dma_async_issue_pending(sspi->rx_chan);
569 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
570 sspi->base + sspi->regs->tx_rx_en);
571 if (sspi->type == SIRF_USP_SPI_P2 ||
572 sspi->type == SIRF_USP_SPI_A7) {
573 writel(SIRFSOC_SPI_FIFO_START,
574 sspi->base + sspi->regs->rxfifo_op);
575 writel(SIRFSOC_SPI_FIFO_START,
576 sspi->base + sspi->regs->txfifo_op);
577 }
578 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
579 dev_err(&spi->dev, "transfer timeout\n");
580 dmaengine_terminate_all(sspi->rx_chan);
581 } else
582 sspi->left_rx_word = 0;
583 /*
584 * we only wait tx-done event if transferring by DMA. for PIO,
585 * we get rx data by writing tx data, so if rx is done, tx has
586 * done earlier
587 */
588 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
589 dev_err(&spi->dev, "transfer timeout\n");
590 if (sspi->type == SIRF_USP_SPI_P2 ||
591 sspi->type == SIRF_USP_SPI_A7)
592 writel(0, sspi->base + sspi->regs->tx_rx_en);
593 dmaengine_terminate_all(sspi->tx_chan);
594 }
595 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
596 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
597 /* TX, RX FIFO stop */
598 writel(0, sspi->base + sspi->regs->rxfifo_op);
599 writel(0, sspi->base + sspi->regs->txfifo_op);
600 if (sspi->left_tx_word >= sspi->dat_max_frm_len)
601 writel(0, sspi->base + sspi->regs->tx_rx_en);
602 if (sspi->type == SIRF_USP_SPI_P2 ||
603 sspi->type == SIRF_USP_SPI_A7)
604 writel(0, sspi->base + sspi->regs->tx_rx_en);
605}
606
607static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
608 struct spi_transfer *t)
609{
610 struct sirfsoc_spi *sspi;
611 int timeout = t->len * 10;
612 unsigned int data_units;
613
614 sspi = spi_master_get_devdata(spi->master);
615 do {
616 writel(SIRFSOC_SPI_FIFO_RESET,
617 sspi->base + sspi->regs->rxfifo_op);
618 writel(SIRFSOC_SPI_FIFO_RESET,
619 sspi->base + sspi->regs->txfifo_op);
620 switch (sspi->type) {
621 case SIRF_USP_SPI_P2:
622 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
623 writel(0x0, sspi->base + sspi->regs->txfifo_op);
624 writel(0, sspi->base + sspi->regs->int_en);
625 writel(readl(sspi->base + sspi->regs->int_st),
626 sspi->base + sspi->regs->int_st);
627 writel(min((sspi->left_tx_word * sspi->word_width),
628 sspi->fifo_size),
629 sspi->base + sspi->regs->tx_dma_io_len);
630 writel(min((sspi->left_rx_word * sspi->word_width),
631 sspi->fifo_size),
632 sspi->base + sspi->regs->rx_dma_io_len);
633 break;
634 case SIRF_USP_SPI_A7:
635 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
636 writel(0x0, sspi->base + sspi->regs->txfifo_op);
637 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
638 writel(readl(sspi->base + sspi->regs->int_st),
639 sspi->base + sspi->regs->int_st);
640 writel(min((sspi->left_tx_word * sspi->word_width),
641 sspi->fifo_size),
642 sspi->base + sspi->regs->tx_dma_io_len);
643 writel(min((sspi->left_rx_word * sspi->word_width),
644 sspi->fifo_size),
645 sspi->base + sspi->regs->rx_dma_io_len);
646 break;
647 case SIRF_REAL_SPI:
648 writel(SIRFSOC_SPI_FIFO_START,
649 sspi->base + sspi->regs->rxfifo_op);
650 writel(SIRFSOC_SPI_FIFO_START,
651 sspi->base + sspi->regs->txfifo_op);
652 writel(0, sspi->base + sspi->regs->int_en);
653 writel(readl(sspi->base + sspi->regs->int_st),
654 sspi->base + sspi->regs->int_st);
655 writel(readl(sspi->base + sspi->regs->spi_ctrl) |
656 SIRFSOC_SPI_MUL_DAT_MODE |
657 SIRFSOC_SPI_ENA_AUTO_CLR,
658 sspi->base + sspi->regs->spi_ctrl);
659 data_units = sspi->fifo_size / sspi->word_width;
660 writel(min(sspi->left_tx_word, data_units) - 1,
661 sspi->base + sspi->regs->tx_dma_io_len);
662 writel(min(sspi->left_rx_word, data_units) - 1,
663 sspi->base + sspi->regs->rx_dma_io_len);
664 break;
665 }
666 while (!((readl(sspi->base + sspi->regs->txfifo_st)
667 & SIRFSOC_SPI_FIFO_FULL_MASK(sspi))) &&
668 sspi->left_tx_word)
669 sspi->tx_word(sspi);
670 writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
671 SIRFSOC_SPI_TX_UFLOW_INT_EN |
672 SIRFSOC_SPI_RX_OFLOW_INT_EN |
673 SIRFSOC_SPI_RX_IO_DMA_INT_EN,
674 sspi->base + sspi->regs->int_en);
675 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
676 sspi->base + sspi->regs->tx_rx_en);
677 if (sspi->type == SIRF_USP_SPI_P2 ||
678 sspi->type == SIRF_USP_SPI_A7) {
679 writel(SIRFSOC_SPI_FIFO_START,
680 sspi->base + sspi->regs->rxfifo_op);
681 writel(SIRFSOC_SPI_FIFO_START,
682 sspi->base + sspi->regs->txfifo_op);
683 }
684 if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
685 !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
686 dev_err(&spi->dev, "transfer timeout\n");
687 if (sspi->type == SIRF_USP_SPI_P2 ||
688 sspi->type == SIRF_USP_SPI_A7)
689 writel(0, sspi->base + sspi->regs->tx_rx_en);
690 break;
691 }
692 while (!((readl(sspi->base + sspi->regs->rxfifo_st)
693 & SIRFSOC_SPI_FIFO_EMPTY_MASK(sspi))) &&
694 sspi->left_rx_word)
695 sspi->rx_word(sspi);
696 if (sspi->type == SIRF_USP_SPI_P2 ||
697 sspi->type == SIRF_USP_SPI_A7)
698 writel(0, sspi->base + sspi->regs->tx_rx_en);
699 writel(0, sspi->base + sspi->regs->rxfifo_op);
700 writel(0, sspi->base + sspi->regs->txfifo_op);
701 } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
702}
703
704static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
705{
706 struct sirfsoc_spi *sspi;
707
708 sspi = spi_master_get_devdata(spi->master);
709 sspi->tx = t->tx_buf;
710 sspi->rx = t->rx_buf;
711 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
712 reinit_completion(&sspi->rx_done);
713 reinit_completion(&sspi->tx_done);
714 /*
715 * in the transfer, if transfer data using command register with rx_buf
716 * null, just fill command data into command register and wait for its
717 * completion.
718 */
719 if (sspi->type == SIRF_REAL_SPI && sspi->tx_by_cmd)
720 spi_sirfsoc_cmd_transfer(spi, t);
721 else if (IS_DMA_VALID(t))
722 spi_sirfsoc_dma_transfer(spi, t);
723 else
724 spi_sirfsoc_pio_transfer(spi, t);
725
726 return t->len - sspi->left_rx_word * sspi->word_width;
727}
728
729static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
730{
731 struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
732
733 if (sspi->hw_cs) {
734 u32 regval;
735
736 switch (sspi->type) {
737 case SIRF_REAL_SPI:
738 regval = readl(sspi->base + sspi->regs->spi_ctrl);
739 switch (value) {
740 case BITBANG_CS_ACTIVE:
741 if (spi->mode & SPI_CS_HIGH)
742 regval |= SIRFSOC_SPI_CS_IO_OUT;
743 else
744 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
745 break;
746 case BITBANG_CS_INACTIVE:
747 if (spi->mode & SPI_CS_HIGH)
748 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
749 else
750 regval |= SIRFSOC_SPI_CS_IO_OUT;
751 break;
752 }
753 writel(regval, sspi->base + sspi->regs->spi_ctrl);
754 break;
755 case SIRF_USP_SPI_P2:
756 case SIRF_USP_SPI_A7:
757 regval = readl(sspi->base +
758 sspi->regs->usp_pin_io_data);
759 switch (value) {
760 case BITBANG_CS_ACTIVE:
761 if (spi->mode & SPI_CS_HIGH)
762 regval |= SIRFSOC_USP_CS_HIGH_VALUE;
763 else
764 regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
765 break;
766 case BITBANG_CS_INACTIVE:
767 if (spi->mode & SPI_CS_HIGH)
768 regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
769 else
770 regval |= SIRFSOC_USP_CS_HIGH_VALUE;
771 break;
772 }
773 writel(regval,
774 sspi->base + sspi->regs->usp_pin_io_data);
775 break;
776 }
777 } else {
778 switch (value) {
779 case BITBANG_CS_ACTIVE:
780 gpio_direction_output(spi->cs_gpio,
781 spi->mode & SPI_CS_HIGH ? 1 : 0);
782 break;
783 case BITBANG_CS_INACTIVE:
784 gpio_direction_output(spi->cs_gpio,
785 spi->mode & SPI_CS_HIGH ? 0 : 1);
786 break;
787 }
788 }
789}
790
791static int spi_sirfsoc_config_mode(struct spi_device *spi)
792{
793 struct sirfsoc_spi *sspi;
794 u32 regval, usp_mode1;
795
796 sspi = spi_master_get_devdata(spi->master);
797 regval = readl(sspi->base + sspi->regs->spi_ctrl);
798 usp_mode1 = readl(sspi->base + sspi->regs->usp_mode1);
799 if (!(spi->mode & SPI_CS_HIGH)) {
800 regval |= SIRFSOC_SPI_CS_IDLE_STAT;
801 usp_mode1 &= ~SIRFSOC_USP_CS_HIGH_VALID;
802 } else {
803 regval &= ~SIRFSOC_SPI_CS_IDLE_STAT;
804 usp_mode1 |= SIRFSOC_USP_CS_HIGH_VALID;
805 }
806 if (!(spi->mode & SPI_LSB_FIRST)) {
807 regval |= SIRFSOC_SPI_TRAN_MSB;
808 usp_mode1 &= ~SIRFSOC_USP_LSB;
809 } else {
810 regval &= ~SIRFSOC_SPI_TRAN_MSB;
811 usp_mode1 |= SIRFSOC_USP_LSB;
812 }
813 if (spi->mode & SPI_CPOL) {
814 regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
815 usp_mode1 |= SIRFSOC_USP_SCLK_IDLE_STAT;
816 } else {
817 regval &= ~SIRFSOC_SPI_CLK_IDLE_STAT;
818 usp_mode1 &= ~SIRFSOC_USP_SCLK_IDLE_STAT;
819 }
820 /*
821 * Data should be driven at least 1/2 cycle before the fetch edge
822 * to make sure that data gets stable at the fetch edge.
823 */
824 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
825 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) {
826 regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
827 usp_mode1 |= (SIRFSOC_USP_TXD_FALLING_EDGE |
828 SIRFSOC_USP_RXD_FALLING_EDGE);
829 } else {
830 regval |= SIRFSOC_SPI_DRV_POS_EDGE;
831 usp_mode1 &= ~(SIRFSOC_USP_RXD_FALLING_EDGE |
832 SIRFSOC_USP_TXD_FALLING_EDGE);
833 }
834 writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
835 SIRFSOC_SPI_FIFO_SC_OFFSET) |
836 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
837 SIRFSOC_SPI_FIFO_LC_OFFSET) |
838 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
839 SIRFSOC_SPI_FIFO_HC_OFFSET),
840 sspi->base + sspi->regs->txfifo_level_chk);
841 writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
842 SIRFSOC_SPI_FIFO_SC_OFFSET) |
843 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
844 SIRFSOC_SPI_FIFO_LC_OFFSET) |
845 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
846 SIRFSOC_SPI_FIFO_HC_OFFSET),
847 sspi->base + sspi->regs->rxfifo_level_chk);
848 /*
849 * it should never set to hardware cs mode because in hardware cs mode,
850 * cs signal can't controlled by driver.
851 */
852 switch (sspi->type) {
853 case SIRF_REAL_SPI:
854 regval |= SIRFSOC_SPI_CS_IO_MODE;
855 writel(regval, sspi->base + sspi->regs->spi_ctrl);
856 break;
857 case SIRF_USP_SPI_P2:
858 case SIRF_USP_SPI_A7:
859 usp_mode1 |= SIRFSOC_USP_SYNC_MODE;
860 usp_mode1 |= SIRFSOC_USP_TFS_IO_MODE;
861 usp_mode1 &= ~SIRFSOC_USP_TFS_IO_INPUT;
862 writel(usp_mode1, sspi->base + sspi->regs->usp_mode1);
863 break;
864 }
865
866 return 0;
867}
868
869static int
870spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
871{
872 struct sirfsoc_spi *sspi;
873 u8 bits_per_word = 0;
874 int hz = 0;
875 u32 regval, txfifo_ctrl, rxfifo_ctrl, tx_frm_ctl, rx_frm_ctl, usp_mode2;
876
877 sspi = spi_master_get_devdata(spi->master);
878
879 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
880 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
881
882 usp_mode2 = regval = (sspi->ctrl_freq / (2 * hz)) - 1;
883 if (regval > 0xFFFF || regval < 0) {
884 dev_err(&spi->dev, "Speed %d not supported\n", hz);
885 return -EINVAL;
886 }
887 switch (bits_per_word) {
888 case 8:
889 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
890 sspi->rx_word = spi_sirfsoc_rx_word_u8;
891 sspi->tx_word = spi_sirfsoc_tx_word_u8;
892 break;
893 case 12:
894 case 16:
895 regval |= (bits_per_word == 12) ?
896 SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
897 SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
898 sspi->rx_word = spi_sirfsoc_rx_word_u16;
899 sspi->tx_word = spi_sirfsoc_tx_word_u16;
900 break;
901 case 32:
902 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
903 sspi->rx_word = spi_sirfsoc_rx_word_u32;
904 sspi->tx_word = spi_sirfsoc_tx_word_u32;
905 break;
906 default:
907 dev_err(&spi->dev, "bpw %d not supported\n", bits_per_word);
908 return -EINVAL;
909 }
910 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
911 txfifo_ctrl = (((sspi->fifo_size / 2) &
912 SIRFSOC_SPI_FIFO_THD_MASK(sspi))
913 << SIRFSOC_SPI_FIFO_THD_OFFSET) |
914 (sspi->word_width >> 1);
915 rxfifo_ctrl = (((sspi->fifo_size / 2) &
916 SIRFSOC_SPI_FIFO_THD_MASK(sspi))
917 << SIRFSOC_SPI_FIFO_THD_OFFSET) |
918 (sspi->word_width >> 1);
919 writel(txfifo_ctrl, sspi->base + sspi->regs->txfifo_ctrl);
920 writel(rxfifo_ctrl, sspi->base + sspi->regs->rxfifo_ctrl);
921 if (sspi->type == SIRF_USP_SPI_P2 ||
922 sspi->type == SIRF_USP_SPI_A7) {
923 tx_frm_ctl = 0;
924 tx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_TX_DATA_MASK)
925 << SIRFSOC_USP_TX_DATA_OFFSET;
926 tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
927 - 1) & SIRFSOC_USP_TX_SYNC_MASK) <<
928 SIRFSOC_USP_TX_SYNC_OFFSET;
929 tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
930 + 2 - 1) & SIRFSOC_USP_TX_FRAME_MASK) <<
931 SIRFSOC_USP_TX_FRAME_OFFSET;
932 tx_frm_ctl |= ((bits_per_word - 1) &
933 SIRFSOC_USP_TX_SHIFTER_MASK) <<
934 SIRFSOC_USP_TX_SHIFTER_OFFSET;
935 rx_frm_ctl = 0;
936 rx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_RX_DATA_MASK)
937 << SIRFSOC_USP_RX_DATA_OFFSET;
938 rx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_RXD_DELAY_LEN
939 + 2 - 1) & SIRFSOC_USP_RX_FRAME_MASK) <<
940 SIRFSOC_USP_RX_FRAME_OFFSET;
941 rx_frm_ctl |= ((bits_per_word - 1)
942 & SIRFSOC_USP_RX_SHIFTER_MASK) <<
943 SIRFSOC_USP_RX_SHIFTER_OFFSET;
944 writel(tx_frm_ctl | (((usp_mode2 >> 10) &
945 SIRFSOC_USP_CLK_10_11_MASK) <<
946 SIRFSOC_USP_CLK_10_11_OFFSET),
947 sspi->base + sspi->regs->usp_tx_frame_ctrl);
948 writel(rx_frm_ctl | (((usp_mode2 >> 12) &
949 SIRFSOC_USP_CLK_12_15_MASK) <<
950 SIRFSOC_USP_CLK_12_15_OFFSET),
951 sspi->base + sspi->regs->usp_rx_frame_ctrl);
952 writel(readl(sspi->base + sspi->regs->usp_mode2) |
953 ((usp_mode2 & SIRFSOC_USP_CLK_DIVISOR_MASK) <<
954 SIRFSOC_USP_CLK_DIVISOR_OFFSET) |
955 (SIRFSOC_USP_RXD_DELAY_LEN <<
956 SIRFSOC_USP_RXD_DELAY_OFFSET) |
957 (SIRFSOC_USP_TXD_DELAY_LEN <<
958 SIRFSOC_USP_TXD_DELAY_OFFSET),
959 sspi->base + sspi->regs->usp_mode2);
960 }
961 if (sspi->type == SIRF_REAL_SPI)
962 writel(regval, sspi->base + sspi->regs->spi_ctrl);
963 spi_sirfsoc_config_mode(spi);
964 if (sspi->type == SIRF_REAL_SPI) {
965 if (t && t->tx_buf && !t->rx_buf &&
966 (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
967 sspi->tx_by_cmd = true;
968 writel(readl(sspi->base + sspi->regs->spi_ctrl) |
969 (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
970 SIRFSOC_SPI_CMD_MODE),
971 sspi->base + sspi->regs->spi_ctrl);
972 } else {
973 sspi->tx_by_cmd = false;
974 writel(readl(sspi->base + sspi->regs->spi_ctrl) &
975 ~SIRFSOC_SPI_CMD_MODE,
976 sspi->base + sspi->regs->spi_ctrl);
977 }
978 }
979 if (IS_DMA_VALID(t)) {
980 /* Enable DMA mode for RX, TX */
981 writel(0, sspi->base + sspi->regs->tx_dma_io_ctrl);
982 writel(SIRFSOC_SPI_RX_DMA_FLUSH,
983 sspi->base + sspi->regs->rx_dma_io_ctrl);
984 } else {
985 /* Enable IO mode for RX, TX */
986 writel(SIRFSOC_SPI_IO_MODE_SEL,
987 sspi->base + sspi->regs->tx_dma_io_ctrl);
988 writel(SIRFSOC_SPI_IO_MODE_SEL,
989 sspi->base + sspi->regs->rx_dma_io_ctrl);
990 }
991 return 0;
992}
993
994static int spi_sirfsoc_setup(struct spi_device *spi)
995{
996 struct sirfsoc_spi *sspi;
997 int ret = 0;
998
999 sspi = spi_master_get_devdata(spi->master);
1000 if (spi->cs_gpio == -ENOENT)
1001 sspi->hw_cs = true;
1002 else {
1003 sspi->hw_cs = false;
1004 if (!spi_get_ctldata(spi)) {
1005 void *cs = kmalloc(sizeof(int), GFP_KERNEL);
1006 if (!cs) {
1007 ret = -ENOMEM;
1008 goto exit;
1009 }
1010 ret = gpio_is_valid(spi->cs_gpio);
1011 if (!ret) {
1012 dev_err(&spi->dev, "no valid gpio\n");
1013 ret = -ENOENT;
1014 goto exit;
1015 }
1016 ret = gpio_request(spi->cs_gpio, DRIVER_NAME);
1017 if (ret) {
1018 dev_err(&spi->dev, "failed to request gpio\n");
1019 goto exit;
1020 }
1021 spi_set_ctldata(spi, cs);
1022 }
1023 }
1024 spi_sirfsoc_config_mode(spi);
1025 spi_sirfsoc_chipselect(spi, BITBANG_CS_INACTIVE);
1026exit:
1027 return ret;
1028}
1029
1030static void spi_sirfsoc_cleanup(struct spi_device *spi)
1031{
1032 if (spi_get_ctldata(spi)) {
1033 gpio_free(spi->cs_gpio);
1034 kfree(spi_get_ctldata(spi));
1035 }
1036}
1037
1038static const struct sirf_spi_comp_data sirf_real_spi = {
1039 .regs = &real_spi_register,
1040 .type = SIRF_REAL_SPI,
1041 .dat_max_frm_len = 64 * 1024,
1042 .fifo_size = 256,
1043};
1044
1045static const struct sirf_spi_comp_data sirf_usp_spi_p2 = {
1046 .regs = &usp_spi_register,
1047 .type = SIRF_USP_SPI_P2,
1048 .dat_max_frm_len = 1024 * 1024,
1049 .fifo_size = 128,
1050 .hwinit = sirfsoc_usp_hwinit,
1051};
1052
1053static const struct sirf_spi_comp_data sirf_usp_spi_a7 = {
1054 .regs = &usp_spi_register,
1055 .type = SIRF_USP_SPI_A7,
1056 .dat_max_frm_len = 1024 * 1024,
1057 .fifo_size = 512,
1058 .hwinit = sirfsoc_usp_hwinit,
1059};
1060
1061static const struct of_device_id spi_sirfsoc_of_match[] = {
1062 { .compatible = "sirf,prima2-spi", .data = &sirf_real_spi},
1063 { .compatible = "sirf,prima2-usp-spi", .data = &sirf_usp_spi_p2},
1064 { .compatible = "sirf,atlas7-usp-spi", .data = &sirf_usp_spi_a7},
1065 {}
1066};
1067MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
1068
1069static int spi_sirfsoc_probe(struct platform_device *pdev)
1070{
1071 struct sirfsoc_spi *sspi;
1072 struct spi_master *master;
1073 const struct sirf_spi_comp_data *spi_comp_data;
1074 int irq;
1075 int ret;
1076 const struct of_device_id *match;
1077
1078 ret = device_reset(&pdev->dev);
1079 if (ret) {
1080 dev_err(&pdev->dev, "SPI reset failed!\n");
1081 return ret;
1082 }
1083
1084 master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
1085 if (!master) {
1086 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
1087 return -ENOMEM;
1088 }
1089 match = of_match_node(spi_sirfsoc_of_match, pdev->dev.of_node);
1090 platform_set_drvdata(pdev, master);
1091 sspi = spi_master_get_devdata(master);
1092 sspi->fifo_full_offset = ilog2(sspi->fifo_size);
1093 spi_comp_data = match->data;
1094 sspi->regs = spi_comp_data->regs;
1095 sspi->type = spi_comp_data->type;
1096 sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
1097 sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
1098 sspi->fifo_size = spi_comp_data->fifo_size;
1099 sspi->base = devm_platform_ioremap_resource(pdev, 0);
1100 if (IS_ERR(sspi->base)) {
1101 ret = PTR_ERR(sspi->base);
1102 goto free_master;
1103 }
1104 irq = platform_get_irq(pdev, 0);
1105 if (irq < 0) {
1106 ret = -ENXIO;
1107 goto free_master;
1108 }
1109 ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
1110 DRIVER_NAME, sspi);
1111 if (ret)
1112 goto free_master;
1113
1114 sspi->bitbang.master = master;
1115 sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
1116 sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
1117 sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
1118 sspi->bitbang.master->setup = spi_sirfsoc_setup;
1119 sspi->bitbang.master->cleanup = spi_sirfsoc_cleanup;
1120 master->bus_num = pdev->id;
1121 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
1122 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
1123 SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
1124 master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
1125 master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
1126 sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
1127
1128 /* request DMA channels */
1129 sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
1130 if (IS_ERR(sspi->rx_chan)) {
1131 dev_err(&pdev->dev, "can not allocate rx dma channel\n");
1132 ret = PTR_ERR(sspi->rx_chan);
1133 goto free_master;
1134 }
1135 sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
1136 if (IS_ERR(sspi->tx_chan)) {
1137 dev_err(&pdev->dev, "can not allocate tx dma channel\n");
1138 ret = PTR_ERR(sspi->tx_chan);
1139 goto free_rx_dma;
1140 }
1141
1142 sspi->clk = clk_get(&pdev->dev, NULL);
1143 if (IS_ERR(sspi->clk)) {
1144 ret = PTR_ERR(sspi->clk);
1145 goto free_tx_dma;
1146 }
1147 clk_prepare_enable(sspi->clk);
1148 if (spi_comp_data->hwinit)
1149 spi_comp_data->hwinit(sspi);
1150 sspi->ctrl_freq = clk_get_rate(sspi->clk);
1151
1152 init_completion(&sspi->rx_done);
1153 init_completion(&sspi->tx_done);
1154
1155 ret = spi_bitbang_start(&sspi->bitbang);
1156 if (ret)
1157 goto free_clk;
1158 dev_info(&pdev->dev, "registered, bus number = %d\n", master->bus_num);
1159
1160 return 0;
1161free_clk:
1162 clk_disable_unprepare(sspi->clk);
1163 clk_put(sspi->clk);
1164free_tx_dma:
1165 dma_release_channel(sspi->tx_chan);
1166free_rx_dma:
1167 dma_release_channel(sspi->rx_chan);
1168free_master:
1169 spi_master_put(master);
1170
1171 return ret;
1172}
1173
1174static int spi_sirfsoc_remove(struct platform_device *pdev)
1175{
1176 struct spi_master *master;
1177 struct sirfsoc_spi *sspi;
1178
1179 master = platform_get_drvdata(pdev);
1180 sspi = spi_master_get_devdata(master);
1181 spi_bitbang_stop(&sspi->bitbang);
1182 clk_disable_unprepare(sspi->clk);
1183 clk_put(sspi->clk);
1184 dma_release_channel(sspi->rx_chan);
1185 dma_release_channel(sspi->tx_chan);
1186 spi_master_put(master);
1187 return 0;
1188}
1189
1190#ifdef CONFIG_PM_SLEEP
1191static int spi_sirfsoc_suspend(struct device *dev)
1192{
1193 struct spi_master *master = dev_get_drvdata(dev);
1194 struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
1195 int ret;
1196
1197 ret = spi_master_suspend(master);
1198 if (ret)
1199 return ret;
1200
1201 clk_disable(sspi->clk);
1202 return 0;
1203}
1204
1205static int spi_sirfsoc_resume(struct device *dev)
1206{
1207 struct spi_master *master = dev_get_drvdata(dev);
1208 struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
1209
1210 clk_enable(sspi->clk);
1211 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
1212 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
1213 writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
1214 writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->rxfifo_op);
1215 return 0;
1216}
1217#endif
1218
1219static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
1220 spi_sirfsoc_resume);
1221
1222static struct platform_driver spi_sirfsoc_driver = {
1223 .driver = {
1224 .name = DRIVER_NAME,
1225 .pm = &spi_sirfsoc_pm_ops,
1226 .of_match_table = spi_sirfsoc_of_match,
1227 },
1228 .probe = spi_sirfsoc_probe,
1229 .remove = spi_sirfsoc_remove,
1230};
1231module_platform_driver(spi_sirfsoc_driver);
1232MODULE_DESCRIPTION("SiRF SoC SPI master driver");
1233MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
1234MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
1235MODULE_AUTHOR("Qipan Li <Qipan.Li@csr.com>");
1236MODULE_LICENSE("GPL v2");
1/*
2 * SPI bus driver for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/clk.h>
13#include <linux/completion.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/of.h>
17#include <linux/bitops.h>
18#include <linux/err.h>
19#include <linux/platform_device.h>
20#include <linux/of_gpio.h>
21#include <linux/spi/spi.h>
22#include <linux/spi/spi_bitbang.h>
23#include <linux/dmaengine.h>
24#include <linux/dma-direction.h>
25#include <linux/dma-mapping.h>
26#include <linux/reset.h>
27
28#define DRIVER_NAME "sirfsoc_spi"
29/* SPI CTRL register defines */
30#define SIRFSOC_SPI_SLV_MODE BIT(16)
31#define SIRFSOC_SPI_CMD_MODE BIT(17)
32#define SIRFSOC_SPI_CS_IO_OUT BIT(18)
33#define SIRFSOC_SPI_CS_IO_MODE BIT(19)
34#define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20)
35#define SIRFSOC_SPI_CS_IDLE_STAT BIT(21)
36#define SIRFSOC_SPI_TRAN_MSB BIT(22)
37#define SIRFSOC_SPI_DRV_POS_EDGE BIT(23)
38#define SIRFSOC_SPI_CS_HOLD_TIME BIT(24)
39#define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25)
40#define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26)
41#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
42#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
43#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
44#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
45#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
46#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
47
48/* Interrupt Enable */
49#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
50#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
51#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
52#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
53#define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
54#define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
55#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
56#define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
57#define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8)
58#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
59#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
60
61/* Interrupt status */
62#define SIRFSOC_SPI_RX_DONE BIT(0)
63#define SIRFSOC_SPI_TX_DONE BIT(1)
64#define SIRFSOC_SPI_RX_OFLOW BIT(2)
65#define SIRFSOC_SPI_TX_UFLOW BIT(3)
66#define SIRFSOC_SPI_RX_IO_DMA BIT(4)
67#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
68#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
69#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
70#define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9)
71#define SIRFSOC_SPI_FRM_END BIT(10)
72
73/* TX RX enable */
74#define SIRFSOC_SPI_RX_EN BIT(0)
75#define SIRFSOC_SPI_TX_EN BIT(1)
76#define SIRFSOC_SPI_CMD_TX_EN BIT(2)
77
78#define SIRFSOC_SPI_IO_MODE_SEL BIT(0)
79#define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2)
80
81/* FIFO OPs */
82#define SIRFSOC_SPI_FIFO_RESET BIT(0)
83#define SIRFSOC_SPI_FIFO_START BIT(1)
84
85/* FIFO CTRL */
86#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
87#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
88#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
89/* USP related */
90#define SIRFSOC_USP_SYNC_MODE BIT(0)
91#define SIRFSOC_USP_SLV_MODE BIT(1)
92#define SIRFSOC_USP_LSB BIT(4)
93#define SIRFSOC_USP_EN BIT(5)
94#define SIRFSOC_USP_RXD_FALLING_EDGE BIT(6)
95#define SIRFSOC_USP_TXD_FALLING_EDGE BIT(7)
96#define SIRFSOC_USP_CS_HIGH_VALID BIT(9)
97#define SIRFSOC_USP_SCLK_IDLE_STAT BIT(11)
98#define SIRFSOC_USP_TFS_IO_MODE BIT(14)
99#define SIRFSOC_USP_TFS_IO_INPUT BIT(19)
100
101#define SIRFSOC_USP_RXD_DELAY_LEN_MASK 0xFF
102#define SIRFSOC_USP_TXD_DELAY_LEN_MASK 0xFF
103#define SIRFSOC_USP_RXD_DELAY_OFFSET 0
104#define SIRFSOC_USP_TXD_DELAY_OFFSET 8
105#define SIRFSOC_USP_RXD_DELAY_LEN 1
106#define SIRFSOC_USP_TXD_DELAY_LEN 1
107#define SIRFSOC_USP_CLK_DIVISOR_OFFSET 21
108#define SIRFSOC_USP_CLK_DIVISOR_MASK 0x3FF
109#define SIRFSOC_USP_CLK_10_11_MASK 0x3
110#define SIRFSOC_USP_CLK_10_11_OFFSET 30
111#define SIRFSOC_USP_CLK_12_15_MASK 0xF
112#define SIRFSOC_USP_CLK_12_15_OFFSET 24
113
114#define SIRFSOC_USP_TX_DATA_OFFSET 0
115#define SIRFSOC_USP_TX_SYNC_OFFSET 8
116#define SIRFSOC_USP_TX_FRAME_OFFSET 16
117#define SIRFSOC_USP_TX_SHIFTER_OFFSET 24
118
119#define SIRFSOC_USP_TX_DATA_MASK 0xFF
120#define SIRFSOC_USP_TX_SYNC_MASK 0xFF
121#define SIRFSOC_USP_TX_FRAME_MASK 0xFF
122#define SIRFSOC_USP_TX_SHIFTER_MASK 0x1F
123
124#define SIRFSOC_USP_RX_DATA_OFFSET 0
125#define SIRFSOC_USP_RX_FRAME_OFFSET 8
126#define SIRFSOC_USP_RX_SHIFTER_OFFSET 16
127
128#define SIRFSOC_USP_RX_DATA_MASK 0xFF
129#define SIRFSOC_USP_RX_FRAME_MASK 0xFF
130#define SIRFSOC_USP_RX_SHIFTER_MASK 0x1F
131#define SIRFSOC_USP_CS_HIGH_VALUE BIT(1)
132
133#define SIRFSOC_SPI_FIFO_SC_OFFSET 0
134#define SIRFSOC_SPI_FIFO_LC_OFFSET 10
135#define SIRFSOC_SPI_FIFO_HC_OFFSET 20
136
137#define SIRFSOC_SPI_FIFO_FULL_MASK(s) (1 << ((s)->fifo_full_offset))
138#define SIRFSOC_SPI_FIFO_EMPTY_MASK(s) (1 << ((s)->fifo_full_offset + 1))
139#define SIRFSOC_SPI_FIFO_THD_MASK(s) ((s)->fifo_size - 1)
140#define SIRFSOC_SPI_FIFO_THD_OFFSET 2
141#define SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(s, val) \
142 ((val) & (s)->fifo_level_chk_mask)
143
144enum sirf_spi_type {
145 SIRF_REAL_SPI,
146 SIRF_USP_SPI_P2,
147 SIRF_USP_SPI_A7,
148};
149
150/*
151 * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
152 * due to the limitation of dma controller
153 */
154
155#define ALIGNED(x) (!((u32)x & 0x3))
156#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
157 ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
158
159#define SIRFSOC_MAX_CMD_BYTES 4
160#define SIRFSOC_SPI_DEFAULT_FRQ 1000000
161
162struct sirf_spi_register {
163 /*SPI and USP-SPI common*/
164 u32 tx_rx_en;
165 u32 int_en;
166 u32 int_st;
167 u32 tx_dma_io_ctrl;
168 u32 tx_dma_io_len;
169 u32 txfifo_ctrl;
170 u32 txfifo_level_chk;
171 u32 txfifo_op;
172 u32 txfifo_st;
173 u32 txfifo_data;
174 u32 rx_dma_io_ctrl;
175 u32 rx_dma_io_len;
176 u32 rxfifo_ctrl;
177 u32 rxfifo_level_chk;
178 u32 rxfifo_op;
179 u32 rxfifo_st;
180 u32 rxfifo_data;
181 /*SPI self*/
182 u32 spi_ctrl;
183 u32 spi_cmd;
184 u32 spi_dummy_delay_ctrl;
185 /*USP-SPI self*/
186 u32 usp_mode1;
187 u32 usp_mode2;
188 u32 usp_tx_frame_ctrl;
189 u32 usp_rx_frame_ctrl;
190 u32 usp_pin_io_data;
191 u32 usp_risc_dsp_mode;
192 u32 usp_async_param_reg;
193 u32 usp_irda_x_mode_div;
194 u32 usp_sm_cfg;
195 u32 usp_int_en_clr;
196};
197
198static const struct sirf_spi_register real_spi_register = {
199 .tx_rx_en = 0x8,
200 .int_en = 0xc,
201 .int_st = 0x10,
202 .tx_dma_io_ctrl = 0x100,
203 .tx_dma_io_len = 0x104,
204 .txfifo_ctrl = 0x108,
205 .txfifo_level_chk = 0x10c,
206 .txfifo_op = 0x110,
207 .txfifo_st = 0x114,
208 .txfifo_data = 0x118,
209 .rx_dma_io_ctrl = 0x120,
210 .rx_dma_io_len = 0x124,
211 .rxfifo_ctrl = 0x128,
212 .rxfifo_level_chk = 0x12c,
213 .rxfifo_op = 0x130,
214 .rxfifo_st = 0x134,
215 .rxfifo_data = 0x138,
216 .spi_ctrl = 0x0,
217 .spi_cmd = 0x4,
218 .spi_dummy_delay_ctrl = 0x144,
219};
220
221static const struct sirf_spi_register usp_spi_register = {
222 .tx_rx_en = 0x10,
223 .int_en = 0x14,
224 .int_st = 0x18,
225 .tx_dma_io_ctrl = 0x100,
226 .tx_dma_io_len = 0x104,
227 .txfifo_ctrl = 0x108,
228 .txfifo_level_chk = 0x10c,
229 .txfifo_op = 0x110,
230 .txfifo_st = 0x114,
231 .txfifo_data = 0x118,
232 .rx_dma_io_ctrl = 0x120,
233 .rx_dma_io_len = 0x124,
234 .rxfifo_ctrl = 0x128,
235 .rxfifo_level_chk = 0x12c,
236 .rxfifo_op = 0x130,
237 .rxfifo_st = 0x134,
238 .rxfifo_data = 0x138,
239 .usp_mode1 = 0x0,
240 .usp_mode2 = 0x4,
241 .usp_tx_frame_ctrl = 0x8,
242 .usp_rx_frame_ctrl = 0xc,
243 .usp_pin_io_data = 0x1c,
244 .usp_risc_dsp_mode = 0x20,
245 .usp_async_param_reg = 0x24,
246 .usp_irda_x_mode_div = 0x28,
247 .usp_sm_cfg = 0x2c,
248 .usp_int_en_clr = 0x140,
249};
250
251struct sirfsoc_spi {
252 struct spi_bitbang bitbang;
253 struct completion rx_done;
254 struct completion tx_done;
255
256 void __iomem *base;
257 u32 ctrl_freq; /* SPI controller clock speed */
258 struct clk *clk;
259
260 /* rx & tx bufs from the spi_transfer */
261 const void *tx;
262 void *rx;
263
264 /* place received word into rx buffer */
265 void (*rx_word) (struct sirfsoc_spi *);
266 /* get word from tx buffer for sending */
267 void (*tx_word) (struct sirfsoc_spi *);
268
269 /* number of words left to be tranmitted/received */
270 unsigned int left_tx_word;
271 unsigned int left_rx_word;
272
273 /* rx & tx DMA channels */
274 struct dma_chan *rx_chan;
275 struct dma_chan *tx_chan;
276 dma_addr_t src_start;
277 dma_addr_t dst_start;
278 int word_width; /* in bytes */
279
280 /*
281 * if tx size is not more than 4 and rx size is NULL, use
282 * command model
283 */
284 bool tx_by_cmd;
285 bool hw_cs;
286 enum sirf_spi_type type;
287 const struct sirf_spi_register *regs;
288 unsigned int fifo_size;
289 /* fifo empty offset is (fifo full offset + 1)*/
290 unsigned int fifo_full_offset;
291 /* fifo_level_chk_mask is (fifo_size/4 - 1) */
292 unsigned int fifo_level_chk_mask;
293 unsigned int dat_max_frm_len;
294};
295
296struct sirf_spi_comp_data {
297 const struct sirf_spi_register *regs;
298 enum sirf_spi_type type;
299 unsigned int dat_max_frm_len;
300 unsigned int fifo_size;
301 void (*hwinit)(struct sirfsoc_spi *sspi);
302};
303
304static void sirfsoc_usp_hwinit(struct sirfsoc_spi *sspi)
305{
306 /* reset USP and let USP can operate */
307 writel(readl(sspi->base + sspi->regs->usp_mode1) &
308 ~SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
309 writel(readl(sspi->base + sspi->regs->usp_mode1) |
310 SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
311}
312
313static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
314{
315 u32 data;
316 u8 *rx = sspi->rx;
317
318 data = readl(sspi->base + sspi->regs->rxfifo_data);
319
320 if (rx) {
321 *rx++ = (u8) data;
322 sspi->rx = rx;
323 }
324
325 sspi->left_rx_word--;
326}
327
328static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
329{
330 u32 data = 0;
331 const u8 *tx = sspi->tx;
332
333 if (tx) {
334 data = *tx++;
335 sspi->tx = tx;
336 }
337 writel(data, sspi->base + sspi->regs->txfifo_data);
338 sspi->left_tx_word--;
339}
340
341static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
342{
343 u32 data;
344 u16 *rx = sspi->rx;
345
346 data = readl(sspi->base + sspi->regs->rxfifo_data);
347
348 if (rx) {
349 *rx++ = (u16) data;
350 sspi->rx = rx;
351 }
352
353 sspi->left_rx_word--;
354}
355
356static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
357{
358 u32 data = 0;
359 const u16 *tx = sspi->tx;
360
361 if (tx) {
362 data = *tx++;
363 sspi->tx = tx;
364 }
365
366 writel(data, sspi->base + sspi->regs->txfifo_data);
367 sspi->left_tx_word--;
368}
369
370static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
371{
372 u32 data;
373 u32 *rx = sspi->rx;
374
375 data = readl(sspi->base + sspi->regs->rxfifo_data);
376
377 if (rx) {
378 *rx++ = (u32) data;
379 sspi->rx = rx;
380 }
381
382 sspi->left_rx_word--;
383
384}
385
386static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
387{
388 u32 data = 0;
389 const u32 *tx = sspi->tx;
390
391 if (tx) {
392 data = *tx++;
393 sspi->tx = tx;
394 }
395
396 writel(data, sspi->base + sspi->regs->txfifo_data);
397 sspi->left_tx_word--;
398}
399
400static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
401{
402 struct sirfsoc_spi *sspi = dev_id;
403 u32 spi_stat;
404
405 spi_stat = readl(sspi->base + sspi->regs->int_st);
406 if (sspi->tx_by_cmd && sspi->type == SIRF_REAL_SPI
407 && (spi_stat & SIRFSOC_SPI_FRM_END)) {
408 complete(&sspi->tx_done);
409 writel(0x0, sspi->base + sspi->regs->int_en);
410 writel(readl(sspi->base + sspi->regs->int_st),
411 sspi->base + sspi->regs->int_st);
412 return IRQ_HANDLED;
413 }
414 /* Error Conditions */
415 if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
416 spi_stat & SIRFSOC_SPI_TX_UFLOW) {
417 complete(&sspi->tx_done);
418 complete(&sspi->rx_done);
419 switch (sspi->type) {
420 case SIRF_REAL_SPI:
421 case SIRF_USP_SPI_P2:
422 writel(0x0, sspi->base + sspi->regs->int_en);
423 break;
424 case SIRF_USP_SPI_A7:
425 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
426 break;
427 }
428 writel(readl(sspi->base + sspi->regs->int_st),
429 sspi->base + sspi->regs->int_st);
430 return IRQ_HANDLED;
431 }
432 if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
433 complete(&sspi->tx_done);
434 while (!(readl(sspi->base + sspi->regs->int_st) &
435 SIRFSOC_SPI_RX_IO_DMA))
436 cpu_relax();
437 complete(&sspi->rx_done);
438 switch (sspi->type) {
439 case SIRF_REAL_SPI:
440 case SIRF_USP_SPI_P2:
441 writel(0x0, sspi->base + sspi->regs->int_en);
442 break;
443 case SIRF_USP_SPI_A7:
444 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
445 break;
446 }
447 writel(readl(sspi->base + sspi->regs->int_st),
448 sspi->base + sspi->regs->int_st);
449
450 return IRQ_HANDLED;
451}
452
453static void spi_sirfsoc_dma_fini_callback(void *data)
454{
455 struct completion *dma_complete = data;
456
457 complete(dma_complete);
458}
459
460static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
461 struct spi_transfer *t)
462{
463 struct sirfsoc_spi *sspi;
464 int timeout = t->len * 10;
465 u32 cmd;
466
467 sspi = spi_master_get_devdata(spi->master);
468 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
469 writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
470 memcpy(&cmd, sspi->tx, t->len);
471 if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
472 cmd = cpu_to_be32(cmd) >>
473 ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
474 if (sspi->word_width == 2 && t->len == 4 &&
475 (!(spi->mode & SPI_LSB_FIRST)))
476 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
477 writel(cmd, sspi->base + sspi->regs->spi_cmd);
478 writel(SIRFSOC_SPI_FRM_END_INT_EN,
479 sspi->base + sspi->regs->int_en);
480 writel(SIRFSOC_SPI_CMD_TX_EN,
481 sspi->base + sspi->regs->tx_rx_en);
482 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
483 dev_err(&spi->dev, "cmd transfer timeout\n");
484 return;
485 }
486 sspi->left_rx_word -= t->len;
487}
488
489static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
490 struct spi_transfer *t)
491{
492 struct sirfsoc_spi *sspi;
493 struct dma_async_tx_descriptor *rx_desc, *tx_desc;
494 int timeout = t->len * 10;
495
496 sspi = spi_master_get_devdata(spi->master);
497 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
498 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
499 switch (sspi->type) {
500 case SIRF_REAL_SPI:
501 writel(SIRFSOC_SPI_FIFO_START,
502 sspi->base + sspi->regs->rxfifo_op);
503 writel(SIRFSOC_SPI_FIFO_START,
504 sspi->base + sspi->regs->txfifo_op);
505 writel(0, sspi->base + sspi->regs->int_en);
506 break;
507 case SIRF_USP_SPI_P2:
508 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
509 writel(0x0, sspi->base + sspi->regs->txfifo_op);
510 writel(0, sspi->base + sspi->regs->int_en);
511 break;
512 case SIRF_USP_SPI_A7:
513 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
514 writel(0x0, sspi->base + sspi->regs->txfifo_op);
515 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
516 break;
517 }
518 writel(readl(sspi->base + sspi->regs->int_st),
519 sspi->base + sspi->regs->int_st);
520 if (sspi->left_tx_word < sspi->dat_max_frm_len) {
521 switch (sspi->type) {
522 case SIRF_REAL_SPI:
523 writel(readl(sspi->base + sspi->regs->spi_ctrl) |
524 SIRFSOC_SPI_ENA_AUTO_CLR |
525 SIRFSOC_SPI_MUL_DAT_MODE,
526 sspi->base + sspi->regs->spi_ctrl);
527 writel(sspi->left_tx_word - 1,
528 sspi->base + sspi->regs->tx_dma_io_len);
529 writel(sspi->left_tx_word - 1,
530 sspi->base + sspi->regs->rx_dma_io_len);
531 break;
532 case SIRF_USP_SPI_P2:
533 case SIRF_USP_SPI_A7:
534 /*USP simulate SPI, tx/rx_dma_io_len indicates bytes*/
535 writel(sspi->left_tx_word * sspi->word_width,
536 sspi->base + sspi->regs->tx_dma_io_len);
537 writel(sspi->left_tx_word * sspi->word_width,
538 sspi->base + sspi->regs->rx_dma_io_len);
539 break;
540 }
541 } else {
542 if (sspi->type == SIRF_REAL_SPI)
543 writel(readl(sspi->base + sspi->regs->spi_ctrl),
544 sspi->base + sspi->regs->spi_ctrl);
545 writel(0, sspi->base + sspi->regs->tx_dma_io_len);
546 writel(0, sspi->base + sspi->regs->rx_dma_io_len);
547 }
548 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
549 (t->tx_buf != t->rx_buf) ?
550 DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
551 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
552 sspi->dst_start, t->len, DMA_DEV_TO_MEM,
553 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
554 rx_desc->callback = spi_sirfsoc_dma_fini_callback;
555 rx_desc->callback_param = &sspi->rx_done;
556
557 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
558 (t->tx_buf != t->rx_buf) ?
559 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
560 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
561 sspi->src_start, t->len, DMA_MEM_TO_DEV,
562 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
563 tx_desc->callback = spi_sirfsoc_dma_fini_callback;
564 tx_desc->callback_param = &sspi->tx_done;
565
566 dmaengine_submit(tx_desc);
567 dmaengine_submit(rx_desc);
568 dma_async_issue_pending(sspi->tx_chan);
569 dma_async_issue_pending(sspi->rx_chan);
570 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
571 sspi->base + sspi->regs->tx_rx_en);
572 if (sspi->type == SIRF_USP_SPI_P2 ||
573 sspi->type == SIRF_USP_SPI_A7) {
574 writel(SIRFSOC_SPI_FIFO_START,
575 sspi->base + sspi->regs->rxfifo_op);
576 writel(SIRFSOC_SPI_FIFO_START,
577 sspi->base + sspi->regs->txfifo_op);
578 }
579 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
580 dev_err(&spi->dev, "transfer timeout\n");
581 dmaengine_terminate_all(sspi->rx_chan);
582 } else
583 sspi->left_rx_word = 0;
584 /*
585 * we only wait tx-done event if transferring by DMA. for PIO,
586 * we get rx data by writing tx data, so if rx is done, tx has
587 * done earlier
588 */
589 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
590 dev_err(&spi->dev, "transfer timeout\n");
591 if (sspi->type == SIRF_USP_SPI_P2 ||
592 sspi->type == SIRF_USP_SPI_A7)
593 writel(0, sspi->base + sspi->regs->tx_rx_en);
594 dmaengine_terminate_all(sspi->tx_chan);
595 }
596 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
597 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
598 /* TX, RX FIFO stop */
599 writel(0, sspi->base + sspi->regs->rxfifo_op);
600 writel(0, sspi->base + sspi->regs->txfifo_op);
601 if (sspi->left_tx_word >= sspi->dat_max_frm_len)
602 writel(0, sspi->base + sspi->regs->tx_rx_en);
603 if (sspi->type == SIRF_USP_SPI_P2 ||
604 sspi->type == SIRF_USP_SPI_A7)
605 writel(0, sspi->base + sspi->regs->tx_rx_en);
606}
607
608static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
609 struct spi_transfer *t)
610{
611 struct sirfsoc_spi *sspi;
612 int timeout = t->len * 10;
613 unsigned int data_units;
614
615 sspi = spi_master_get_devdata(spi->master);
616 do {
617 writel(SIRFSOC_SPI_FIFO_RESET,
618 sspi->base + sspi->regs->rxfifo_op);
619 writel(SIRFSOC_SPI_FIFO_RESET,
620 sspi->base + sspi->regs->txfifo_op);
621 switch (sspi->type) {
622 case SIRF_USP_SPI_P2:
623 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
624 writel(0x0, sspi->base + sspi->regs->txfifo_op);
625 writel(0, sspi->base + sspi->regs->int_en);
626 writel(readl(sspi->base + sspi->regs->int_st),
627 sspi->base + sspi->regs->int_st);
628 writel(min((sspi->left_tx_word * sspi->word_width),
629 sspi->fifo_size),
630 sspi->base + sspi->regs->tx_dma_io_len);
631 writel(min((sspi->left_rx_word * sspi->word_width),
632 sspi->fifo_size),
633 sspi->base + sspi->regs->rx_dma_io_len);
634 break;
635 case SIRF_USP_SPI_A7:
636 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
637 writel(0x0, sspi->base + sspi->regs->txfifo_op);
638 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
639 writel(readl(sspi->base + sspi->regs->int_st),
640 sspi->base + sspi->regs->int_st);
641 writel(min((sspi->left_tx_word * sspi->word_width),
642 sspi->fifo_size),
643 sspi->base + sspi->regs->tx_dma_io_len);
644 writel(min((sspi->left_rx_word * sspi->word_width),
645 sspi->fifo_size),
646 sspi->base + sspi->regs->rx_dma_io_len);
647 break;
648 case SIRF_REAL_SPI:
649 writel(SIRFSOC_SPI_FIFO_START,
650 sspi->base + sspi->regs->rxfifo_op);
651 writel(SIRFSOC_SPI_FIFO_START,
652 sspi->base + sspi->regs->txfifo_op);
653 writel(0, sspi->base + sspi->regs->int_en);
654 writel(readl(sspi->base + sspi->regs->int_st),
655 sspi->base + sspi->regs->int_st);
656 writel(readl(sspi->base + sspi->regs->spi_ctrl) |
657 SIRFSOC_SPI_MUL_DAT_MODE |
658 SIRFSOC_SPI_ENA_AUTO_CLR,
659 sspi->base + sspi->regs->spi_ctrl);
660 data_units = sspi->fifo_size / sspi->word_width;
661 writel(min(sspi->left_tx_word, data_units) - 1,
662 sspi->base + sspi->regs->tx_dma_io_len);
663 writel(min(sspi->left_rx_word, data_units) - 1,
664 sspi->base + sspi->regs->rx_dma_io_len);
665 break;
666 }
667 while (!((readl(sspi->base + sspi->regs->txfifo_st)
668 & SIRFSOC_SPI_FIFO_FULL_MASK(sspi))) &&
669 sspi->left_tx_word)
670 sspi->tx_word(sspi);
671 writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
672 SIRFSOC_SPI_TX_UFLOW_INT_EN |
673 SIRFSOC_SPI_RX_OFLOW_INT_EN |
674 SIRFSOC_SPI_RX_IO_DMA_INT_EN,
675 sspi->base + sspi->regs->int_en);
676 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
677 sspi->base + sspi->regs->tx_rx_en);
678 if (sspi->type == SIRF_USP_SPI_P2 ||
679 sspi->type == SIRF_USP_SPI_A7) {
680 writel(SIRFSOC_SPI_FIFO_START,
681 sspi->base + sspi->regs->rxfifo_op);
682 writel(SIRFSOC_SPI_FIFO_START,
683 sspi->base + sspi->regs->txfifo_op);
684 }
685 if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
686 !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
687 dev_err(&spi->dev, "transfer timeout\n");
688 if (sspi->type == SIRF_USP_SPI_P2 ||
689 sspi->type == SIRF_USP_SPI_A7)
690 writel(0, sspi->base + sspi->regs->tx_rx_en);
691 break;
692 }
693 while (!((readl(sspi->base + sspi->regs->rxfifo_st)
694 & SIRFSOC_SPI_FIFO_EMPTY_MASK(sspi))) &&
695 sspi->left_rx_word)
696 sspi->rx_word(sspi);
697 if (sspi->type == SIRF_USP_SPI_P2 ||
698 sspi->type == SIRF_USP_SPI_A7)
699 writel(0, sspi->base + sspi->regs->tx_rx_en);
700 writel(0, sspi->base + sspi->regs->rxfifo_op);
701 writel(0, sspi->base + sspi->regs->txfifo_op);
702 } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
703}
704
705static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
706{
707 struct sirfsoc_spi *sspi;
708
709 sspi = spi_master_get_devdata(spi->master);
710 sspi->tx = t->tx_buf;
711 sspi->rx = t->rx_buf;
712 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
713 reinit_completion(&sspi->rx_done);
714 reinit_completion(&sspi->tx_done);
715 /*
716 * in the transfer, if transfer data using command register with rx_buf
717 * null, just fill command data into command register and wait for its
718 * completion.
719 */
720 if (sspi->type == SIRF_REAL_SPI && sspi->tx_by_cmd)
721 spi_sirfsoc_cmd_transfer(spi, t);
722 else if (IS_DMA_VALID(t))
723 spi_sirfsoc_dma_transfer(spi, t);
724 else
725 spi_sirfsoc_pio_transfer(spi, t);
726
727 return t->len - sspi->left_rx_word * sspi->word_width;
728}
729
730static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
731{
732 struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
733
734 if (sspi->hw_cs) {
735 u32 regval;
736
737 switch (sspi->type) {
738 case SIRF_REAL_SPI:
739 regval = readl(sspi->base + sspi->regs->spi_ctrl);
740 switch (value) {
741 case BITBANG_CS_ACTIVE:
742 if (spi->mode & SPI_CS_HIGH)
743 regval |= SIRFSOC_SPI_CS_IO_OUT;
744 else
745 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
746 break;
747 case BITBANG_CS_INACTIVE:
748 if (spi->mode & SPI_CS_HIGH)
749 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
750 else
751 regval |= SIRFSOC_SPI_CS_IO_OUT;
752 break;
753 }
754 writel(regval, sspi->base + sspi->regs->spi_ctrl);
755 break;
756 case SIRF_USP_SPI_P2:
757 case SIRF_USP_SPI_A7:
758 regval = readl(sspi->base +
759 sspi->regs->usp_pin_io_data);
760 switch (value) {
761 case BITBANG_CS_ACTIVE:
762 if (spi->mode & SPI_CS_HIGH)
763 regval |= SIRFSOC_USP_CS_HIGH_VALUE;
764 else
765 regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
766 break;
767 case BITBANG_CS_INACTIVE:
768 if (spi->mode & SPI_CS_HIGH)
769 regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
770 else
771 regval |= SIRFSOC_USP_CS_HIGH_VALUE;
772 break;
773 }
774 writel(regval,
775 sspi->base + sspi->regs->usp_pin_io_data);
776 break;
777 }
778 } else {
779 switch (value) {
780 case BITBANG_CS_ACTIVE:
781 gpio_direction_output(spi->cs_gpio,
782 spi->mode & SPI_CS_HIGH ? 1 : 0);
783 break;
784 case BITBANG_CS_INACTIVE:
785 gpio_direction_output(spi->cs_gpio,
786 spi->mode & SPI_CS_HIGH ? 0 : 1);
787 break;
788 }
789 }
790}
791
792static int spi_sirfsoc_config_mode(struct spi_device *spi)
793{
794 struct sirfsoc_spi *sspi;
795 u32 regval, usp_mode1;
796
797 sspi = spi_master_get_devdata(spi->master);
798 regval = readl(sspi->base + sspi->regs->spi_ctrl);
799 usp_mode1 = readl(sspi->base + sspi->regs->usp_mode1);
800 if (!(spi->mode & SPI_CS_HIGH)) {
801 regval |= SIRFSOC_SPI_CS_IDLE_STAT;
802 usp_mode1 &= ~SIRFSOC_USP_CS_HIGH_VALID;
803 } else {
804 regval &= ~SIRFSOC_SPI_CS_IDLE_STAT;
805 usp_mode1 |= SIRFSOC_USP_CS_HIGH_VALID;
806 }
807 if (!(spi->mode & SPI_LSB_FIRST)) {
808 regval |= SIRFSOC_SPI_TRAN_MSB;
809 usp_mode1 &= ~SIRFSOC_USP_LSB;
810 } else {
811 regval &= ~SIRFSOC_SPI_TRAN_MSB;
812 usp_mode1 |= SIRFSOC_USP_LSB;
813 }
814 if (spi->mode & SPI_CPOL) {
815 regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
816 usp_mode1 |= SIRFSOC_USP_SCLK_IDLE_STAT;
817 } else {
818 regval &= ~SIRFSOC_SPI_CLK_IDLE_STAT;
819 usp_mode1 &= ~SIRFSOC_USP_SCLK_IDLE_STAT;
820 }
821 /*
822 * Data should be driven at least 1/2 cycle before the fetch edge
823 * to make sure that data gets stable at the fetch edge.
824 */
825 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
826 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) {
827 regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
828 usp_mode1 |= (SIRFSOC_USP_TXD_FALLING_EDGE |
829 SIRFSOC_USP_RXD_FALLING_EDGE);
830 } else {
831 regval |= SIRFSOC_SPI_DRV_POS_EDGE;
832 usp_mode1 &= ~(SIRFSOC_USP_RXD_FALLING_EDGE |
833 SIRFSOC_USP_TXD_FALLING_EDGE);
834 }
835 writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
836 SIRFSOC_SPI_FIFO_SC_OFFSET) |
837 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
838 SIRFSOC_SPI_FIFO_LC_OFFSET) |
839 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
840 SIRFSOC_SPI_FIFO_HC_OFFSET),
841 sspi->base + sspi->regs->txfifo_level_chk);
842 writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
843 SIRFSOC_SPI_FIFO_SC_OFFSET) |
844 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
845 SIRFSOC_SPI_FIFO_LC_OFFSET) |
846 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
847 SIRFSOC_SPI_FIFO_HC_OFFSET),
848 sspi->base + sspi->regs->rxfifo_level_chk);
849 /*
850 * it should never set to hardware cs mode because in hardware cs mode,
851 * cs signal can't controlled by driver.
852 */
853 switch (sspi->type) {
854 case SIRF_REAL_SPI:
855 regval |= SIRFSOC_SPI_CS_IO_MODE;
856 writel(regval, sspi->base + sspi->regs->spi_ctrl);
857 break;
858 case SIRF_USP_SPI_P2:
859 case SIRF_USP_SPI_A7:
860 usp_mode1 |= SIRFSOC_USP_SYNC_MODE;
861 usp_mode1 |= SIRFSOC_USP_TFS_IO_MODE;
862 usp_mode1 &= ~SIRFSOC_USP_TFS_IO_INPUT;
863 writel(usp_mode1, sspi->base + sspi->regs->usp_mode1);
864 break;
865 }
866
867 return 0;
868}
869
870static int
871spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
872{
873 struct sirfsoc_spi *sspi;
874 u8 bits_per_word = 0;
875 int hz = 0;
876 u32 regval, txfifo_ctrl, rxfifo_ctrl, tx_frm_ctl, rx_frm_ctl, usp_mode2;
877
878 sspi = spi_master_get_devdata(spi->master);
879
880 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
881 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
882
883 usp_mode2 = regval = (sspi->ctrl_freq / (2 * hz)) - 1;
884 if (regval > 0xFFFF || regval < 0) {
885 dev_err(&spi->dev, "Speed %d not supported\n", hz);
886 return -EINVAL;
887 }
888 switch (bits_per_word) {
889 case 8:
890 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
891 sspi->rx_word = spi_sirfsoc_rx_word_u8;
892 sspi->tx_word = spi_sirfsoc_tx_word_u8;
893 break;
894 case 12:
895 case 16:
896 regval |= (bits_per_word == 12) ?
897 SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
898 SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
899 sspi->rx_word = spi_sirfsoc_rx_word_u16;
900 sspi->tx_word = spi_sirfsoc_tx_word_u16;
901 break;
902 case 32:
903 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
904 sspi->rx_word = spi_sirfsoc_rx_word_u32;
905 sspi->tx_word = spi_sirfsoc_tx_word_u32;
906 break;
907 default:
908 dev_err(&spi->dev, "bpw %d not supported\n", bits_per_word);
909 return -EINVAL;
910 }
911 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
912 txfifo_ctrl = (((sspi->fifo_size / 2) &
913 SIRFSOC_SPI_FIFO_THD_MASK(sspi))
914 << SIRFSOC_SPI_FIFO_THD_OFFSET) |
915 (sspi->word_width >> 1);
916 rxfifo_ctrl = (((sspi->fifo_size / 2) &
917 SIRFSOC_SPI_FIFO_THD_MASK(sspi))
918 << SIRFSOC_SPI_FIFO_THD_OFFSET) |
919 (sspi->word_width >> 1);
920 writel(txfifo_ctrl, sspi->base + sspi->regs->txfifo_ctrl);
921 writel(rxfifo_ctrl, sspi->base + sspi->regs->rxfifo_ctrl);
922 if (sspi->type == SIRF_USP_SPI_P2 ||
923 sspi->type == SIRF_USP_SPI_A7) {
924 tx_frm_ctl = 0;
925 tx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_TX_DATA_MASK)
926 << SIRFSOC_USP_TX_DATA_OFFSET;
927 tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
928 - 1) & SIRFSOC_USP_TX_SYNC_MASK) <<
929 SIRFSOC_USP_TX_SYNC_OFFSET;
930 tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
931 + 2 - 1) & SIRFSOC_USP_TX_FRAME_MASK) <<
932 SIRFSOC_USP_TX_FRAME_OFFSET;
933 tx_frm_ctl |= ((bits_per_word - 1) &
934 SIRFSOC_USP_TX_SHIFTER_MASK) <<
935 SIRFSOC_USP_TX_SHIFTER_OFFSET;
936 rx_frm_ctl = 0;
937 rx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_RX_DATA_MASK)
938 << SIRFSOC_USP_RX_DATA_OFFSET;
939 rx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_RXD_DELAY_LEN
940 + 2 - 1) & SIRFSOC_USP_RX_FRAME_MASK) <<
941 SIRFSOC_USP_RX_FRAME_OFFSET;
942 rx_frm_ctl |= ((bits_per_word - 1)
943 & SIRFSOC_USP_RX_SHIFTER_MASK) <<
944 SIRFSOC_USP_RX_SHIFTER_OFFSET;
945 writel(tx_frm_ctl | (((usp_mode2 >> 10) &
946 SIRFSOC_USP_CLK_10_11_MASK) <<
947 SIRFSOC_USP_CLK_10_11_OFFSET),
948 sspi->base + sspi->regs->usp_tx_frame_ctrl);
949 writel(rx_frm_ctl | (((usp_mode2 >> 12) &
950 SIRFSOC_USP_CLK_12_15_MASK) <<
951 SIRFSOC_USP_CLK_12_15_OFFSET),
952 sspi->base + sspi->regs->usp_rx_frame_ctrl);
953 writel(readl(sspi->base + sspi->regs->usp_mode2) |
954 ((usp_mode2 & SIRFSOC_USP_CLK_DIVISOR_MASK) <<
955 SIRFSOC_USP_CLK_DIVISOR_OFFSET) |
956 (SIRFSOC_USP_RXD_DELAY_LEN <<
957 SIRFSOC_USP_RXD_DELAY_OFFSET) |
958 (SIRFSOC_USP_TXD_DELAY_LEN <<
959 SIRFSOC_USP_TXD_DELAY_OFFSET),
960 sspi->base + sspi->regs->usp_mode2);
961 }
962 if (sspi->type == SIRF_REAL_SPI)
963 writel(regval, sspi->base + sspi->regs->spi_ctrl);
964 spi_sirfsoc_config_mode(spi);
965 if (sspi->type == SIRF_REAL_SPI) {
966 if (t && t->tx_buf && !t->rx_buf &&
967 (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
968 sspi->tx_by_cmd = true;
969 writel(readl(sspi->base + sspi->regs->spi_ctrl) |
970 (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
971 SIRFSOC_SPI_CMD_MODE),
972 sspi->base + sspi->regs->spi_ctrl);
973 } else {
974 sspi->tx_by_cmd = false;
975 writel(readl(sspi->base + sspi->regs->spi_ctrl) &
976 ~SIRFSOC_SPI_CMD_MODE,
977 sspi->base + sspi->regs->spi_ctrl);
978 }
979 }
980 if (IS_DMA_VALID(t)) {
981 /* Enable DMA mode for RX, TX */
982 writel(0, sspi->base + sspi->regs->tx_dma_io_ctrl);
983 writel(SIRFSOC_SPI_RX_DMA_FLUSH,
984 sspi->base + sspi->regs->rx_dma_io_ctrl);
985 } else {
986 /* Enable IO mode for RX, TX */
987 writel(SIRFSOC_SPI_IO_MODE_SEL,
988 sspi->base + sspi->regs->tx_dma_io_ctrl);
989 writel(SIRFSOC_SPI_IO_MODE_SEL,
990 sspi->base + sspi->regs->rx_dma_io_ctrl);
991 }
992 return 0;
993}
994
995static int spi_sirfsoc_setup(struct spi_device *spi)
996{
997 struct sirfsoc_spi *sspi;
998 int ret = 0;
999
1000 sspi = spi_master_get_devdata(spi->master);
1001 if (spi->cs_gpio == -ENOENT)
1002 sspi->hw_cs = true;
1003 else {
1004 sspi->hw_cs = false;
1005 if (!spi_get_ctldata(spi)) {
1006 void *cs = kmalloc(sizeof(int), GFP_KERNEL);
1007 if (!cs) {
1008 ret = -ENOMEM;
1009 goto exit;
1010 }
1011 ret = gpio_is_valid(spi->cs_gpio);
1012 if (!ret) {
1013 dev_err(&spi->dev, "no valid gpio\n");
1014 ret = -ENOENT;
1015 goto exit;
1016 }
1017 ret = gpio_request(spi->cs_gpio, DRIVER_NAME);
1018 if (ret) {
1019 dev_err(&spi->dev, "failed to request gpio\n");
1020 goto exit;
1021 }
1022 spi_set_ctldata(spi, cs);
1023 }
1024 }
1025 spi_sirfsoc_config_mode(spi);
1026 spi_sirfsoc_chipselect(spi, BITBANG_CS_INACTIVE);
1027exit:
1028 return ret;
1029}
1030
1031static void spi_sirfsoc_cleanup(struct spi_device *spi)
1032{
1033 if (spi_get_ctldata(spi)) {
1034 gpio_free(spi->cs_gpio);
1035 kfree(spi_get_ctldata(spi));
1036 }
1037}
1038
1039static const struct sirf_spi_comp_data sirf_real_spi = {
1040 .regs = &real_spi_register,
1041 .type = SIRF_REAL_SPI,
1042 .dat_max_frm_len = 64 * 1024,
1043 .fifo_size = 256,
1044};
1045
1046static const struct sirf_spi_comp_data sirf_usp_spi_p2 = {
1047 .regs = &usp_spi_register,
1048 .type = SIRF_USP_SPI_P2,
1049 .dat_max_frm_len = 1024 * 1024,
1050 .fifo_size = 128,
1051 .hwinit = sirfsoc_usp_hwinit,
1052};
1053
1054static const struct sirf_spi_comp_data sirf_usp_spi_a7 = {
1055 .regs = &usp_spi_register,
1056 .type = SIRF_USP_SPI_A7,
1057 .dat_max_frm_len = 1024 * 1024,
1058 .fifo_size = 512,
1059 .hwinit = sirfsoc_usp_hwinit,
1060};
1061
1062static const struct of_device_id spi_sirfsoc_of_match[] = {
1063 { .compatible = "sirf,prima2-spi", .data = &sirf_real_spi},
1064 { .compatible = "sirf,prima2-usp-spi", .data = &sirf_usp_spi_p2},
1065 { .compatible = "sirf,atlas7-usp-spi", .data = &sirf_usp_spi_a7},
1066 {}
1067};
1068MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
1069
1070static int spi_sirfsoc_probe(struct platform_device *pdev)
1071{
1072 struct sirfsoc_spi *sspi;
1073 struct spi_master *master;
1074 struct resource *mem_res;
1075 struct sirf_spi_comp_data *spi_comp_data;
1076 int irq;
1077 int ret;
1078 const struct of_device_id *match;
1079
1080 ret = device_reset(&pdev->dev);
1081 if (ret) {
1082 dev_err(&pdev->dev, "SPI reset failed!\n");
1083 return ret;
1084 }
1085
1086 master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
1087 if (!master) {
1088 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
1089 return -ENOMEM;
1090 }
1091 match = of_match_node(spi_sirfsoc_of_match, pdev->dev.of_node);
1092 platform_set_drvdata(pdev, master);
1093 sspi = spi_master_get_devdata(master);
1094 sspi->fifo_full_offset = ilog2(sspi->fifo_size);
1095 spi_comp_data = (struct sirf_spi_comp_data *)match->data;
1096 sspi->regs = spi_comp_data->regs;
1097 sspi->type = spi_comp_data->type;
1098 sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
1099 sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
1100 sspi->fifo_size = spi_comp_data->fifo_size;
1101 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1102 sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
1103 if (IS_ERR(sspi->base)) {
1104 ret = PTR_ERR(sspi->base);
1105 goto free_master;
1106 }
1107 irq = platform_get_irq(pdev, 0);
1108 if (irq < 0) {
1109 ret = -ENXIO;
1110 goto free_master;
1111 }
1112 ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
1113 DRIVER_NAME, sspi);
1114 if (ret)
1115 goto free_master;
1116
1117 sspi->bitbang.master = master;
1118 sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
1119 sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
1120 sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
1121 sspi->bitbang.master->setup = spi_sirfsoc_setup;
1122 sspi->bitbang.master->cleanup = spi_sirfsoc_cleanup;
1123 master->bus_num = pdev->id;
1124 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
1125 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
1126 SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
1127 master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
1128 master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
1129 sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
1130
1131 /* request DMA channels */
1132 sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
1133 if (!sspi->rx_chan) {
1134 dev_err(&pdev->dev, "can not allocate rx dma channel\n");
1135 ret = -ENODEV;
1136 goto free_master;
1137 }
1138 sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
1139 if (!sspi->tx_chan) {
1140 dev_err(&pdev->dev, "can not allocate tx dma channel\n");
1141 ret = -ENODEV;
1142 goto free_rx_dma;
1143 }
1144
1145 sspi->clk = clk_get(&pdev->dev, NULL);
1146 if (IS_ERR(sspi->clk)) {
1147 ret = PTR_ERR(sspi->clk);
1148 goto free_tx_dma;
1149 }
1150 clk_prepare_enable(sspi->clk);
1151 if (spi_comp_data->hwinit)
1152 spi_comp_data->hwinit(sspi);
1153 sspi->ctrl_freq = clk_get_rate(sspi->clk);
1154
1155 init_completion(&sspi->rx_done);
1156 init_completion(&sspi->tx_done);
1157
1158 ret = spi_bitbang_start(&sspi->bitbang);
1159 if (ret)
1160 goto free_clk;
1161 dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
1162
1163 return 0;
1164free_clk:
1165 clk_disable_unprepare(sspi->clk);
1166 clk_put(sspi->clk);
1167free_tx_dma:
1168 dma_release_channel(sspi->tx_chan);
1169free_rx_dma:
1170 dma_release_channel(sspi->rx_chan);
1171free_master:
1172 spi_master_put(master);
1173
1174 return ret;
1175}
1176
1177static int spi_sirfsoc_remove(struct platform_device *pdev)
1178{
1179 struct spi_master *master;
1180 struct sirfsoc_spi *sspi;
1181
1182 master = platform_get_drvdata(pdev);
1183 sspi = spi_master_get_devdata(master);
1184 spi_bitbang_stop(&sspi->bitbang);
1185 clk_disable_unprepare(sspi->clk);
1186 clk_put(sspi->clk);
1187 dma_release_channel(sspi->rx_chan);
1188 dma_release_channel(sspi->tx_chan);
1189 spi_master_put(master);
1190 return 0;
1191}
1192
1193#ifdef CONFIG_PM_SLEEP
1194static int spi_sirfsoc_suspend(struct device *dev)
1195{
1196 struct spi_master *master = dev_get_drvdata(dev);
1197 struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
1198 int ret;
1199
1200 ret = spi_master_suspend(master);
1201 if (ret)
1202 return ret;
1203
1204 clk_disable(sspi->clk);
1205 return 0;
1206}
1207
1208static int spi_sirfsoc_resume(struct device *dev)
1209{
1210 struct spi_master *master = dev_get_drvdata(dev);
1211 struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
1212
1213 clk_enable(sspi->clk);
1214 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
1215 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
1216 writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
1217 writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->rxfifo_op);
1218 return 0;
1219}
1220#endif
1221
1222static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
1223 spi_sirfsoc_resume);
1224
1225static struct platform_driver spi_sirfsoc_driver = {
1226 .driver = {
1227 .name = DRIVER_NAME,
1228 .pm = &spi_sirfsoc_pm_ops,
1229 .of_match_table = spi_sirfsoc_of_match,
1230 },
1231 .probe = spi_sirfsoc_probe,
1232 .remove = spi_sirfsoc_remove,
1233};
1234module_platform_driver(spi_sirfsoc_driver);
1235MODULE_DESCRIPTION("SiRF SoC SPI master driver");
1236MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
1237MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
1238MODULE_AUTHOR("Qipan Li <Qipan.Li@csr.com>");
1239MODULE_LICENSE("GPL v2");