Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
4 *
5 * Copyright 2016 Broadcom
6 */
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/device.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/ioport.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_irq.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/spi/spi.h>
22#include <linux/spi/spi-mem.h>
23#include <linux/sysfs.h>
24#include <linux/types.h>
25#include "spi-bcm-qspi.h"
26
27#define DRIVER_NAME "bcm_qspi"
28
29
30/* BSPI register offsets */
31#define BSPI_REVISION_ID 0x000
32#define BSPI_SCRATCH 0x004
33#define BSPI_MAST_N_BOOT_CTRL 0x008
34#define BSPI_BUSY_STATUS 0x00c
35#define BSPI_INTR_STATUS 0x010
36#define BSPI_B0_STATUS 0x014
37#define BSPI_B0_CTRL 0x018
38#define BSPI_B1_STATUS 0x01c
39#define BSPI_B1_CTRL 0x020
40#define BSPI_STRAP_OVERRIDE_CTRL 0x024
41#define BSPI_FLEX_MODE_ENABLE 0x028
42#define BSPI_BITS_PER_CYCLE 0x02c
43#define BSPI_BITS_PER_PHASE 0x030
44#define BSPI_CMD_AND_MODE_BYTE 0x034
45#define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
46#define BSPI_BSPI_XOR_VALUE 0x03c
47#define BSPI_BSPI_XOR_ENABLE 0x040
48#define BSPI_BSPI_PIO_MODE_ENABLE 0x044
49#define BSPI_BSPI_PIO_IODIR 0x048
50#define BSPI_BSPI_PIO_DATA 0x04c
51
52/* RAF register offsets */
53#define BSPI_RAF_START_ADDR 0x100
54#define BSPI_RAF_NUM_WORDS 0x104
55#define BSPI_RAF_CTRL 0x108
56#define BSPI_RAF_FULLNESS 0x10c
57#define BSPI_RAF_WATERMARK 0x110
58#define BSPI_RAF_STATUS 0x114
59#define BSPI_RAF_READ_DATA 0x118
60#define BSPI_RAF_WORD_CNT 0x11c
61#define BSPI_RAF_CURR_ADDR 0x120
62
63/* Override mode masks */
64#define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
65#define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
66#define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
67#define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
68#define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
69
70#define BSPI_ADDRLEN_3BYTES 3
71#define BSPI_ADDRLEN_4BYTES 4
72
73#define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
74
75#define BSPI_RAF_CTRL_START_MASK BIT(0)
76#define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
77
78#define BSPI_BPP_MODE_SELECT_MASK BIT(8)
79#define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
80
81#define BSPI_READ_LENGTH 256
82
83/* MSPI register offsets */
84#define MSPI_SPCR0_LSB 0x000
85#define MSPI_SPCR0_MSB 0x004
86#define MSPI_SPCR1_LSB 0x008
87#define MSPI_SPCR1_MSB 0x00c
88#define MSPI_NEWQP 0x010
89#define MSPI_ENDQP 0x014
90#define MSPI_SPCR2 0x018
91#define MSPI_MSPI_STATUS 0x020
92#define MSPI_CPTQP 0x024
93#define MSPI_SPCR3 0x028
94#define MSPI_TXRAM 0x040
95#define MSPI_RXRAM 0x0c0
96#define MSPI_CDRAM 0x140
97#define MSPI_WRITE_LOCK 0x180
98
99#define MSPI_MASTER_BIT BIT(7)
100
101#define MSPI_NUM_CDRAM 16
102#define MSPI_CDRAM_CONT_BIT BIT(7)
103#define MSPI_CDRAM_BITSE_BIT BIT(6)
104#define MSPI_CDRAM_PCS 0xf
105
106#define MSPI_SPCR2_SPE BIT(6)
107#define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
108
109#define MSPI_MSPI_STATUS_SPIF BIT(0)
110
111#define INTR_BASE_BIT_SHIFT 0x02
112#define INTR_COUNT 0x07
113
114#define NUM_CHIPSELECT 4
115#define QSPI_SPBR_MIN 8U
116#define QSPI_SPBR_MAX 255U
117
118#define OPCODE_DIOR 0xBB
119#define OPCODE_QIOR 0xEB
120#define OPCODE_DIOR_4B 0xBC
121#define OPCODE_QIOR_4B 0xEC
122
123#define MAX_CMD_SIZE 6
124
125#define ADDR_4MB_MASK GENMASK(22, 0)
126
127/* stop at end of transfer, no other reason */
128#define TRANS_STATUS_BREAK_NONE 0
129/* stop at end of spi_message */
130#define TRANS_STATUS_BREAK_EOM 1
131/* stop at end of spi_transfer if delay */
132#define TRANS_STATUS_BREAK_DELAY 2
133/* stop at end of spi_transfer if cs_change */
134#define TRANS_STATUS_BREAK_CS_CHANGE 4
135/* stop if we run out of bytes */
136#define TRANS_STATUS_BREAK_NO_BYTES 8
137
138/* events that make us stop filling TX slots */
139#define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
140 TRANS_STATUS_BREAK_DELAY | \
141 TRANS_STATUS_BREAK_CS_CHANGE)
142
143/* events that make us deassert CS */
144#define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
145 TRANS_STATUS_BREAK_CS_CHANGE)
146
147struct bcm_qspi_parms {
148 u32 speed_hz;
149 u8 mode;
150 u8 bits_per_word;
151};
152
153struct bcm_xfer_mode {
154 bool flex_mode;
155 unsigned int width;
156 unsigned int addrlen;
157 unsigned int hp;
158};
159
160enum base_type {
161 MSPI,
162 BSPI,
163 CHIP_SELECT,
164 BASEMAX,
165};
166
167enum irq_source {
168 SINGLE_L2,
169 MUXED_L1,
170};
171
172struct bcm_qspi_irq {
173 const char *irq_name;
174 const irq_handler_t irq_handler;
175 int irq_source;
176 u32 mask;
177};
178
179struct bcm_qspi_dev_id {
180 const struct bcm_qspi_irq *irqp;
181 void *dev;
182};
183
184
185struct qspi_trans {
186 struct spi_transfer *trans;
187 int byte;
188 bool mspi_last_trans;
189};
190
191struct bcm_qspi {
192 struct platform_device *pdev;
193 struct spi_master *master;
194 struct clk *clk;
195 u32 base_clk;
196 u32 max_speed_hz;
197 void __iomem *base[BASEMAX];
198
199 /* Some SoCs provide custom interrupt status register(s) */
200 struct bcm_qspi_soc_intc *soc_intc;
201
202 struct bcm_qspi_parms last_parms;
203 struct qspi_trans trans_pos;
204 int curr_cs;
205 int bspi_maj_rev;
206 int bspi_min_rev;
207 int bspi_enabled;
208 const struct spi_mem_op *bspi_rf_op;
209 u32 bspi_rf_op_idx;
210 u32 bspi_rf_op_len;
211 u32 bspi_rf_op_status;
212 struct bcm_xfer_mode xfer_mode;
213 u32 s3_strap_override_ctrl;
214 bool bspi_mode;
215 bool big_endian;
216 int num_irqs;
217 struct bcm_qspi_dev_id *dev_ids;
218 struct completion mspi_done;
219 struct completion bspi_done;
220};
221
222static inline bool has_bspi(struct bcm_qspi *qspi)
223{
224 return qspi->bspi_mode;
225}
226
227/* Read qspi controller register*/
228static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
229 unsigned int offset)
230{
231 return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
232}
233
234/* Write qspi controller register*/
235static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
236 unsigned int offset, unsigned int data)
237{
238 bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
239}
240
241/* BSPI helpers */
242static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
243{
244 int i;
245
246 /* this should normally finish within 10us */
247 for (i = 0; i < 1000; i++) {
248 if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
249 return 0;
250 udelay(1);
251 }
252 dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
253 return -EIO;
254}
255
256static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
257{
258 if (qspi->bspi_maj_rev < 4)
259 return true;
260 return false;
261}
262
263static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
264{
265 bcm_qspi_bspi_busy_poll(qspi);
266 /* Force rising edge for the b0/b1 'flush' field */
267 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
268 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
269 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
270 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
271}
272
273static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
274{
275 return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
276 BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
277}
278
279static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
280{
281 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
282
283 /* BSPI v3 LR is LE only, convert data to host endianness */
284 if (bcm_qspi_bspi_ver_three(qspi))
285 data = le32_to_cpu(data);
286
287 return data;
288}
289
290static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
291{
292 bcm_qspi_bspi_busy_poll(qspi);
293 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
294 BSPI_RAF_CTRL_START_MASK);
295}
296
297static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
298{
299 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
300 BSPI_RAF_CTRL_CLEAR_MASK);
301 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
302}
303
304static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
305{
306 u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
307 u32 data = 0;
308
309 dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
310 qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
311 while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
312 data = bcm_qspi_bspi_lr_read_fifo(qspi);
313 if (likely(qspi->bspi_rf_op_len >= 4) &&
314 IS_ALIGNED((uintptr_t)buf, 4)) {
315 buf[qspi->bspi_rf_op_idx++] = data;
316 qspi->bspi_rf_op_len -= 4;
317 } else {
318 /* Read out remaining bytes, make sure*/
319 u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
320
321 data = cpu_to_le32(data);
322 while (qspi->bspi_rf_op_len) {
323 *cbuf++ = (u8)data;
324 data >>= 8;
325 qspi->bspi_rf_op_len--;
326 }
327 }
328 }
329}
330
331static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
332 int bpp, int bpc, int flex_mode)
333{
334 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
335 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
336 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
337 bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
338 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
339}
340
341static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
342 const struct spi_mem_op *op, int hp)
343{
344 int bpc = 0, bpp = 0;
345 u8 command = op->cmd.opcode;
346 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
347 int addrlen = op->addr.nbytes;
348 int flex_mode = 1;
349
350 dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
351 width, addrlen, hp);
352
353 if (addrlen == BSPI_ADDRLEN_4BYTES)
354 bpp = BSPI_BPP_ADDR_SELECT_MASK;
355
356 bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
357
358 switch (width) {
359 case SPI_NBITS_SINGLE:
360 if (addrlen == BSPI_ADDRLEN_3BYTES)
361 /* default mode, does not need flex_cmd */
362 flex_mode = 0;
363 break;
364 case SPI_NBITS_DUAL:
365 bpc = 0x00000001;
366 if (hp) {
367 bpc |= 0x00010100; /* address and mode are 2-bit */
368 bpp = BSPI_BPP_MODE_SELECT_MASK;
369 }
370 break;
371 case SPI_NBITS_QUAD:
372 bpc = 0x00000002;
373 if (hp) {
374 bpc |= 0x00020200; /* address and mode are 4-bit */
375 bpp |= BSPI_BPP_MODE_SELECT_MASK;
376 }
377 break;
378 default:
379 return -EINVAL;
380 }
381
382 bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
383
384 return 0;
385}
386
387static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
388 const struct spi_mem_op *op, int hp)
389{
390 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
391 int addrlen = op->addr.nbytes;
392 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
393
394 dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
395 width, addrlen, hp);
396
397 switch (width) {
398 case SPI_NBITS_SINGLE:
399 /* clear quad/dual mode */
400 data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
401 BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
402 break;
403 case SPI_NBITS_QUAD:
404 /* clear dual mode and set quad mode */
405 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
406 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
407 break;
408 case SPI_NBITS_DUAL:
409 /* clear quad mode set dual mode */
410 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
411 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
412 break;
413 default:
414 return -EINVAL;
415 }
416
417 if (addrlen == BSPI_ADDRLEN_4BYTES)
418 /* set 4byte mode*/
419 data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
420 else
421 /* clear 4 byte mode */
422 data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
423
424 /* set the override mode */
425 data |= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
426 bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
427 bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
428
429 return 0;
430}
431
432static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
433 const struct spi_mem_op *op, int hp)
434{
435 int error = 0;
436 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
437 int addrlen = op->addr.nbytes;
438
439 /* default mode */
440 qspi->xfer_mode.flex_mode = true;
441
442 if (!bcm_qspi_bspi_ver_three(qspi)) {
443 u32 val, mask;
444
445 val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
446 mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
447 if (val & mask || qspi->s3_strap_override_ctrl & mask) {
448 qspi->xfer_mode.flex_mode = false;
449 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
450 error = bcm_qspi_bspi_set_override(qspi, op, hp);
451 }
452 }
453
454 if (qspi->xfer_mode.flex_mode)
455 error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
456
457 if (error) {
458 dev_warn(&qspi->pdev->dev,
459 "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
460 width, addrlen, hp);
461 } else if (qspi->xfer_mode.width != width ||
462 qspi->xfer_mode.addrlen != addrlen ||
463 qspi->xfer_mode.hp != hp) {
464 qspi->xfer_mode.width = width;
465 qspi->xfer_mode.addrlen = addrlen;
466 qspi->xfer_mode.hp = hp;
467 dev_dbg(&qspi->pdev->dev,
468 "cs:%d %d-lane output, %d-byte address%s\n",
469 qspi->curr_cs,
470 qspi->xfer_mode.width,
471 qspi->xfer_mode.addrlen,
472 qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
473 }
474
475 return error;
476}
477
478static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
479{
480 if (!has_bspi(qspi))
481 return;
482
483 qspi->bspi_enabled = 1;
484 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
485 return;
486
487 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
488 udelay(1);
489 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
490 udelay(1);
491}
492
493static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
494{
495 if (!has_bspi(qspi))
496 return;
497
498 qspi->bspi_enabled = 0;
499 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
500 return;
501
502 bcm_qspi_bspi_busy_poll(qspi);
503 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
504 udelay(1);
505}
506
507static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
508{
509 u32 rd = 0;
510 u32 wr = 0;
511
512 if (qspi->base[CHIP_SELECT]) {
513 rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
514 wr = (rd & ~0xff) | (1 << cs);
515 if (rd == wr)
516 return;
517 bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
518 usleep_range(10, 20);
519 }
520
521 dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
522 qspi->curr_cs = cs;
523}
524
525/* MSPI helpers */
526static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
527 const struct bcm_qspi_parms *xp)
528{
529 u32 spcr, spbr = 0;
530
531 if (xp->speed_hz)
532 spbr = qspi->base_clk / (2 * xp->speed_hz);
533
534 spcr = clamp_val(spbr, QSPI_SPBR_MIN, QSPI_SPBR_MAX);
535 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr);
536
537 spcr = MSPI_MASTER_BIT;
538 /* for 16 bit the data should be zero */
539 if (xp->bits_per_word != 16)
540 spcr |= xp->bits_per_word << 2;
541 spcr |= xp->mode & 3;
542 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
543
544 qspi->last_parms = *xp;
545}
546
547static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
548 struct spi_device *spi,
549 struct spi_transfer *trans)
550{
551 struct bcm_qspi_parms xp;
552
553 xp.speed_hz = trans->speed_hz;
554 xp.bits_per_word = trans->bits_per_word;
555 xp.mode = spi->mode;
556
557 bcm_qspi_hw_set_parms(qspi, &xp);
558}
559
560static int bcm_qspi_setup(struct spi_device *spi)
561{
562 struct bcm_qspi_parms *xp;
563
564 if (spi->bits_per_word > 16)
565 return -EINVAL;
566
567 xp = spi_get_ctldata(spi);
568 if (!xp) {
569 xp = kzalloc(sizeof(*xp), GFP_KERNEL);
570 if (!xp)
571 return -ENOMEM;
572 spi_set_ctldata(spi, xp);
573 }
574 xp->speed_hz = spi->max_speed_hz;
575 xp->mode = spi->mode;
576
577 if (spi->bits_per_word)
578 xp->bits_per_word = spi->bits_per_word;
579 else
580 xp->bits_per_word = 8;
581
582 return 0;
583}
584
585static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
586 struct qspi_trans *qt)
587{
588 if (qt->mspi_last_trans &&
589 spi_transfer_is_last(qspi->master, qt->trans))
590 return true;
591 else
592 return false;
593}
594
595static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
596 struct qspi_trans *qt, int flags)
597{
598 int ret = TRANS_STATUS_BREAK_NONE;
599
600 /* count the last transferred bytes */
601 if (qt->trans->bits_per_word <= 8)
602 qt->byte++;
603 else
604 qt->byte += 2;
605
606 if (qt->byte >= qt->trans->len) {
607 /* we're at the end of the spi_transfer */
608 /* in TX mode, need to pause for a delay or CS change */
609 if (qt->trans->delay_usecs &&
610 (flags & TRANS_STATUS_BREAK_DELAY))
611 ret |= TRANS_STATUS_BREAK_DELAY;
612 if (qt->trans->cs_change &&
613 (flags & TRANS_STATUS_BREAK_CS_CHANGE))
614 ret |= TRANS_STATUS_BREAK_CS_CHANGE;
615 if (ret)
616 goto done;
617
618 dev_dbg(&qspi->pdev->dev, "advance msg exit\n");
619 if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
620 ret = TRANS_STATUS_BREAK_EOM;
621 else
622 ret = TRANS_STATUS_BREAK_NO_BYTES;
623
624 qt->trans = NULL;
625 }
626
627done:
628 dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
629 qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
630 return ret;
631}
632
633static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
634{
635 u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
636
637 /* mask out reserved bits */
638 return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
639}
640
641static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
642{
643 u32 reg_offset = MSPI_RXRAM;
644 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
645 u32 msb_offset = reg_offset + (slot << 3);
646
647 return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
648 ((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
649}
650
651static void read_from_hw(struct bcm_qspi *qspi, int slots)
652{
653 struct qspi_trans tp;
654 int slot;
655
656 bcm_qspi_disable_bspi(qspi);
657
658 if (slots > MSPI_NUM_CDRAM) {
659 /* should never happen */
660 dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
661 return;
662 }
663
664 tp = qspi->trans_pos;
665
666 for (slot = 0; slot < slots; slot++) {
667 if (tp.trans->bits_per_word <= 8) {
668 u8 *buf = tp.trans->rx_buf;
669
670 if (buf)
671 buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
672 dev_dbg(&qspi->pdev->dev, "RD %02x\n",
673 buf ? buf[tp.byte] : 0xff);
674 } else {
675 u16 *buf = tp.trans->rx_buf;
676
677 if (buf)
678 buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
679 slot);
680 dev_dbg(&qspi->pdev->dev, "RD %04x\n",
681 buf ? buf[tp.byte] : 0xffff);
682 }
683
684 update_qspi_trans_byte_count(qspi, &tp,
685 TRANS_STATUS_BREAK_NONE);
686 }
687
688 qspi->trans_pos = tp;
689}
690
691static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
692 u8 val)
693{
694 u32 reg_offset = MSPI_TXRAM + (slot << 3);
695
696 /* mask out reserved bits */
697 bcm_qspi_write(qspi, MSPI, reg_offset, val);
698}
699
700static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
701 u16 val)
702{
703 u32 reg_offset = MSPI_TXRAM;
704 u32 msb_offset = reg_offset + (slot << 3);
705 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
706
707 bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
708 bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
709}
710
711static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
712{
713 return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
714}
715
716static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
717{
718 bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
719}
720
721/* Return number of slots written */
722static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
723{
724 struct qspi_trans tp;
725 int slot = 0, tstatus = 0;
726 u32 mspi_cdram = 0;
727
728 bcm_qspi_disable_bspi(qspi);
729 tp = qspi->trans_pos;
730 bcm_qspi_update_parms(qspi, spi, tp.trans);
731
732 /* Run until end of transfer or reached the max data */
733 while (!tstatus && slot < MSPI_NUM_CDRAM) {
734 if (tp.trans->bits_per_word <= 8) {
735 const u8 *buf = tp.trans->tx_buf;
736 u8 val = buf ? buf[tp.byte] : 0xff;
737
738 write_txram_slot_u8(qspi, slot, val);
739 dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
740 } else {
741 const u16 *buf = tp.trans->tx_buf;
742 u16 val = buf ? buf[tp.byte / 2] : 0xffff;
743
744 write_txram_slot_u16(qspi, slot, val);
745 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
746 }
747 mspi_cdram = MSPI_CDRAM_CONT_BIT;
748
749 if (has_bspi(qspi))
750 mspi_cdram &= ~1;
751 else
752 mspi_cdram |= (~(1 << spi->chip_select) &
753 MSPI_CDRAM_PCS);
754
755 mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
756 MSPI_CDRAM_BITSE_BIT);
757
758 write_cdram_slot(qspi, slot, mspi_cdram);
759
760 tstatus = update_qspi_trans_byte_count(qspi, &tp,
761 TRANS_STATUS_BREAK_TX);
762 slot++;
763 }
764
765 if (!slot) {
766 dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
767 goto done;
768 }
769
770 dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
771 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
772 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
773
774 if (tstatus & TRANS_STATUS_BREAK_DESELECT) {
775 mspi_cdram = read_cdram_slot(qspi, slot - 1) &
776 ~MSPI_CDRAM_CONT_BIT;
777 write_cdram_slot(qspi, slot - 1, mspi_cdram);
778 }
779
780 if (has_bspi(qspi))
781 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
782
783 /* Must flush previous writes before starting MSPI operation */
784 mb();
785 /* Set cont | spe | spifie */
786 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
787
788done:
789 return slot;
790}
791
792static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
793 const struct spi_mem_op *op)
794{
795 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
796 u32 addr = 0, len, rdlen, len_words, from = 0;
797 int ret = 0;
798 unsigned long timeo = msecs_to_jiffies(100);
799 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
800
801 if (bcm_qspi_bspi_ver_three(qspi))
802 if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
803 return -EIO;
804
805 from = op->addr.val;
806 bcm_qspi_chip_select(qspi, spi->chip_select);
807 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
808
809 /*
810 * when using flex mode we need to send
811 * the upper address byte to bspi
812 */
813 if (bcm_qspi_bspi_ver_three(qspi) == false) {
814 addr = from & 0xff000000;
815 bcm_qspi_write(qspi, BSPI,
816 BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
817 }
818
819 if (!qspi->xfer_mode.flex_mode)
820 addr = from;
821 else
822 addr = from & 0x00ffffff;
823
824 if (bcm_qspi_bspi_ver_three(qspi) == true)
825 addr = (addr + 0xc00000) & 0xffffff;
826
827 /*
828 * read into the entire buffer by breaking the reads
829 * into RAF buffer read lengths
830 */
831 len = op->data.nbytes;
832 qspi->bspi_rf_op_idx = 0;
833
834 do {
835 if (len > BSPI_READ_LENGTH)
836 rdlen = BSPI_READ_LENGTH;
837 else
838 rdlen = len;
839
840 reinit_completion(&qspi->bspi_done);
841 bcm_qspi_enable_bspi(qspi);
842 len_words = (rdlen + 3) >> 2;
843 qspi->bspi_rf_op = op;
844 qspi->bspi_rf_op_status = 0;
845 qspi->bspi_rf_op_len = rdlen;
846 dev_dbg(&qspi->pdev->dev,
847 "bspi xfr addr 0x%x len 0x%x", addr, rdlen);
848 bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
849 bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
850 bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
851 if (qspi->soc_intc) {
852 /*
853 * clear soc MSPI and BSPI interrupts and enable
854 * BSPI interrupts.
855 */
856 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
857 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
858 }
859
860 /* Must flush previous writes before starting BSPI operation */
861 mb();
862 bcm_qspi_bspi_lr_start(qspi);
863 if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
864 dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
865 ret = -ETIMEDOUT;
866 break;
867 }
868
869 /* set msg return length */
870 addr += rdlen;
871 len -= rdlen;
872 } while (len);
873
874 return ret;
875}
876
877static int bcm_qspi_transfer_one(struct spi_master *master,
878 struct spi_device *spi,
879 struct spi_transfer *trans)
880{
881 struct bcm_qspi *qspi = spi_master_get_devdata(master);
882 int slots;
883 unsigned long timeo = msecs_to_jiffies(100);
884
885 bcm_qspi_chip_select(qspi, spi->chip_select);
886 qspi->trans_pos.trans = trans;
887 qspi->trans_pos.byte = 0;
888
889 while (qspi->trans_pos.byte < trans->len) {
890 reinit_completion(&qspi->mspi_done);
891
892 slots = write_to_hw(qspi, spi);
893 if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
894 dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
895 return -ETIMEDOUT;
896 }
897
898 read_from_hw(qspi, slots);
899 }
900 bcm_qspi_enable_bspi(qspi);
901
902 return 0;
903}
904
905static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
906 const struct spi_mem_op *op)
907{
908 struct spi_master *master = spi->master;
909 struct bcm_qspi *qspi = spi_master_get_devdata(master);
910 struct spi_transfer t[2];
911 u8 cmd[6] = { };
912 int ret, i;
913
914 memset(cmd, 0, sizeof(cmd));
915 memset(t, 0, sizeof(t));
916
917 /* tx */
918 /* opcode is in cmd[0] */
919 cmd[0] = op->cmd.opcode;
920 for (i = 0; i < op->addr.nbytes; i++)
921 cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
922
923 t[0].tx_buf = cmd;
924 t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
925 t[0].bits_per_word = spi->bits_per_word;
926 t[0].tx_nbits = op->cmd.buswidth;
927 /* lets mspi know that this is not last transfer */
928 qspi->trans_pos.mspi_last_trans = false;
929 ret = bcm_qspi_transfer_one(master, spi, &t[0]);
930
931 /* rx */
932 qspi->trans_pos.mspi_last_trans = true;
933 if (!ret) {
934 /* rx */
935 t[1].rx_buf = op->data.buf.in;
936 t[1].len = op->data.nbytes;
937 t[1].rx_nbits = op->data.buswidth;
938 t[1].bits_per_word = spi->bits_per_word;
939 ret = bcm_qspi_transfer_one(master, spi, &t[1]);
940 }
941
942 return ret;
943}
944
945static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
946 const struct spi_mem_op *op)
947{
948 struct spi_device *spi = mem->spi;
949 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
950 int ret = 0;
951 bool mspi_read = false;
952 u32 addr = 0, len;
953 u_char *buf;
954
955 if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
956 op->data.dir != SPI_MEM_DATA_IN)
957 return -ENOTSUPP;
958
959 buf = op->data.buf.in;
960 addr = op->addr.val;
961 len = op->data.nbytes;
962
963 if (bcm_qspi_bspi_ver_three(qspi) == true) {
964 /*
965 * The address coming into this function is a raw flash offset.
966 * But for BSPI <= V3, we need to convert it to a remapped BSPI
967 * address. If it crosses a 4MB boundary, just revert back to
968 * using MSPI.
969 */
970 addr = (addr + 0xc00000) & 0xffffff;
971
972 if ((~ADDR_4MB_MASK & addr) ^
973 (~ADDR_4MB_MASK & (addr + len - 1)))
974 mspi_read = true;
975 }
976
977 /* non-aligned and very short transfers are handled by MSPI */
978 if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
979 len < 4)
980 mspi_read = true;
981
982 if (mspi_read)
983 return bcm_qspi_mspi_exec_mem_op(spi, op);
984
985 ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
986
987 if (!ret)
988 ret = bcm_qspi_bspi_exec_mem_op(spi, op);
989
990 return ret;
991}
992
993static void bcm_qspi_cleanup(struct spi_device *spi)
994{
995 struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
996
997 kfree(xp);
998}
999
1000static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
1001{
1002 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1003 struct bcm_qspi *qspi = qspi_dev_id->dev;
1004 u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1005
1006 if (status & MSPI_MSPI_STATUS_SPIF) {
1007 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1008 /* clear interrupt */
1009 status &= ~MSPI_MSPI_STATUS_SPIF;
1010 bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
1011 if (qspi->soc_intc)
1012 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
1013 complete(&qspi->mspi_done);
1014 return IRQ_HANDLED;
1015 }
1016
1017 return IRQ_NONE;
1018}
1019
1020static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
1021{
1022 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1023 struct bcm_qspi *qspi = qspi_dev_id->dev;
1024 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1025 u32 status = qspi_dev_id->irqp->mask;
1026
1027 if (qspi->bspi_enabled && qspi->bspi_rf_op) {
1028 bcm_qspi_bspi_lr_data_read(qspi);
1029 if (qspi->bspi_rf_op_len == 0) {
1030 qspi->bspi_rf_op = NULL;
1031 if (qspi->soc_intc) {
1032 /* disable soc BSPI interrupt */
1033 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
1034 false);
1035 /* indicate done */
1036 status = INTR_BSPI_LR_SESSION_DONE_MASK;
1037 }
1038
1039 if (qspi->bspi_rf_op_status)
1040 bcm_qspi_bspi_lr_clear(qspi);
1041 else
1042 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
1043 }
1044
1045 if (qspi->soc_intc)
1046 /* clear soc BSPI interrupt */
1047 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
1048 }
1049
1050 status &= INTR_BSPI_LR_SESSION_DONE_MASK;
1051 if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
1052 complete(&qspi->bspi_done);
1053
1054 return IRQ_HANDLED;
1055}
1056
1057static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
1058{
1059 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1060 struct bcm_qspi *qspi = qspi_dev_id->dev;
1061 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1062
1063 dev_err(&qspi->pdev->dev, "BSPI INT error\n");
1064 qspi->bspi_rf_op_status = -EIO;
1065 if (qspi->soc_intc)
1066 /* clear soc interrupt */
1067 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
1068
1069 complete(&qspi->bspi_done);
1070 return IRQ_HANDLED;
1071}
1072
1073static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
1074{
1075 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1076 struct bcm_qspi *qspi = qspi_dev_id->dev;
1077 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1078 irqreturn_t ret = IRQ_NONE;
1079
1080 if (soc_intc) {
1081 u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
1082
1083 if (status & MSPI_DONE)
1084 ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
1085 else if (status & BSPI_DONE)
1086 ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
1087 else if (status & BSPI_ERR)
1088 ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
1089 }
1090
1091 return ret;
1092}
1093
1094static const struct bcm_qspi_irq qspi_irq_tab[] = {
1095 {
1096 .irq_name = "spi_lr_fullness_reached",
1097 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1098 .mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
1099 },
1100 {
1101 .irq_name = "spi_lr_session_aborted",
1102 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1103 .mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
1104 },
1105 {
1106 .irq_name = "spi_lr_impatient",
1107 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1108 .mask = INTR_BSPI_LR_IMPATIENT_MASK,
1109 },
1110 {
1111 .irq_name = "spi_lr_session_done",
1112 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1113 .mask = INTR_BSPI_LR_SESSION_DONE_MASK,
1114 },
1115#ifdef QSPI_INT_DEBUG
1116 /* this interrupt is for debug purposes only, dont request irq */
1117 {
1118 .irq_name = "spi_lr_overread",
1119 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1120 .mask = INTR_BSPI_LR_OVERREAD_MASK,
1121 },
1122#endif
1123 {
1124 .irq_name = "mspi_done",
1125 .irq_handler = bcm_qspi_mspi_l2_isr,
1126 .mask = INTR_MSPI_DONE_MASK,
1127 },
1128 {
1129 .irq_name = "mspi_halted",
1130 .irq_handler = bcm_qspi_mspi_l2_isr,
1131 .mask = INTR_MSPI_HALTED_MASK,
1132 },
1133 {
1134 /* single muxed L1 interrupt source */
1135 .irq_name = "spi_l1_intr",
1136 .irq_handler = bcm_qspi_l1_isr,
1137 .irq_source = MUXED_L1,
1138 .mask = QSPI_INTERRUPTS_ALL,
1139 },
1140};
1141
1142static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
1143{
1144 u32 val = 0;
1145
1146 val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
1147 qspi->bspi_maj_rev = (val >> 8) & 0xff;
1148 qspi->bspi_min_rev = val & 0xff;
1149 if (!(bcm_qspi_bspi_ver_three(qspi))) {
1150 /* Force mapping of BSPI address -> flash offset */
1151 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
1152 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
1153 }
1154 qspi->bspi_enabled = 1;
1155 bcm_qspi_disable_bspi(qspi);
1156 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
1157 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
1158}
1159
1160static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
1161{
1162 struct bcm_qspi_parms parms;
1163
1164 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
1165 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
1166 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
1167 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
1168 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
1169
1170 parms.mode = SPI_MODE_3;
1171 parms.bits_per_word = 8;
1172 parms.speed_hz = qspi->max_speed_hz;
1173 bcm_qspi_hw_set_parms(qspi, &parms);
1174
1175 if (has_bspi(qspi))
1176 bcm_qspi_bspi_init(qspi);
1177}
1178
1179static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
1180{
1181 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
1182 if (has_bspi(qspi))
1183 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1184
1185}
1186
1187static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
1188 .exec_op = bcm_qspi_exec_mem_op,
1189};
1190
1191static const struct of_device_id bcm_qspi_of_match[] = {
1192 { .compatible = "brcm,spi-bcm-qspi" },
1193 {},
1194};
1195MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
1196
1197int bcm_qspi_probe(struct platform_device *pdev,
1198 struct bcm_qspi_soc_intc *soc_intc)
1199{
1200 struct device *dev = &pdev->dev;
1201 struct bcm_qspi *qspi;
1202 struct spi_master *master;
1203 struct resource *res;
1204 int irq, ret = 0, num_ints = 0;
1205 u32 val;
1206 const char *name = NULL;
1207 int num_irqs = ARRAY_SIZE(qspi_irq_tab);
1208
1209 /* We only support device-tree instantiation */
1210 if (!dev->of_node)
1211 return -ENODEV;
1212
1213 if (!of_match_node(bcm_qspi_of_match, dev->of_node))
1214 return -ENODEV;
1215
1216 master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
1217 if (!master) {
1218 dev_err(dev, "error allocating spi_master\n");
1219 return -ENOMEM;
1220 }
1221
1222 qspi = spi_master_get_devdata(master);
1223 qspi->pdev = pdev;
1224 qspi->trans_pos.trans = NULL;
1225 qspi->trans_pos.byte = 0;
1226 qspi->trans_pos.mspi_last_trans = true;
1227 qspi->master = master;
1228
1229 master->bus_num = -1;
1230 master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD;
1231 master->setup = bcm_qspi_setup;
1232 master->transfer_one = bcm_qspi_transfer_one;
1233 master->mem_ops = &bcm_qspi_mem_ops;
1234 master->cleanup = bcm_qspi_cleanup;
1235 master->dev.of_node = dev->of_node;
1236 master->num_chipselect = NUM_CHIPSELECT;
1237
1238 qspi->big_endian = of_device_is_big_endian(dev->of_node);
1239
1240 if (!of_property_read_u32(dev->of_node, "num-cs", &val))
1241 master->num_chipselect = val;
1242
1243 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
1244 if (!res)
1245 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1246 "mspi");
1247
1248 if (res) {
1249 qspi->base[MSPI] = devm_ioremap_resource(dev, res);
1250 if (IS_ERR(qspi->base[MSPI])) {
1251 ret = PTR_ERR(qspi->base[MSPI]);
1252 goto qspi_resource_err;
1253 }
1254 } else {
1255 goto qspi_resource_err;
1256 }
1257
1258 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
1259 if (res) {
1260 qspi->base[BSPI] = devm_ioremap_resource(dev, res);
1261 if (IS_ERR(qspi->base[BSPI])) {
1262 ret = PTR_ERR(qspi->base[BSPI]);
1263 goto qspi_resource_err;
1264 }
1265 qspi->bspi_mode = true;
1266 } else {
1267 qspi->bspi_mode = false;
1268 }
1269
1270 dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
1271
1272 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
1273 if (res) {
1274 qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
1275 if (IS_ERR(qspi->base[CHIP_SELECT])) {
1276 ret = PTR_ERR(qspi->base[CHIP_SELECT]);
1277 goto qspi_resource_err;
1278 }
1279 }
1280
1281 qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
1282 GFP_KERNEL);
1283 if (!qspi->dev_ids) {
1284 ret = -ENOMEM;
1285 goto qspi_resource_err;
1286 }
1287
1288 for (val = 0; val < num_irqs; val++) {
1289 irq = -1;
1290 name = qspi_irq_tab[val].irq_name;
1291 if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
1292 /* get the l2 interrupts */
1293 irq = platform_get_irq_byname(pdev, name);
1294 } else if (!num_ints && soc_intc) {
1295 /* all mspi, bspi intrs muxed to one L1 intr */
1296 irq = platform_get_irq(pdev, 0);
1297 }
1298
1299 if (irq >= 0) {
1300 ret = devm_request_irq(&pdev->dev, irq,
1301 qspi_irq_tab[val].irq_handler, 0,
1302 name,
1303 &qspi->dev_ids[val]);
1304 if (ret < 0) {
1305 dev_err(&pdev->dev, "IRQ %s not found\n", name);
1306 goto qspi_probe_err;
1307 }
1308
1309 qspi->dev_ids[val].dev = qspi;
1310 qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
1311 num_ints++;
1312 dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
1313 qspi_irq_tab[val].irq_name,
1314 irq);
1315 }
1316 }
1317
1318 if (!num_ints) {
1319 dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
1320 ret = -EINVAL;
1321 goto qspi_probe_err;
1322 }
1323
1324 /*
1325 * Some SoCs integrate spi controller (e.g., its interrupt bits)
1326 * in specific ways
1327 */
1328 if (soc_intc) {
1329 qspi->soc_intc = soc_intc;
1330 soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
1331 } else {
1332 qspi->soc_intc = NULL;
1333 }
1334
1335 qspi->clk = devm_clk_get(&pdev->dev, NULL);
1336 if (IS_ERR(qspi->clk)) {
1337 dev_warn(dev, "unable to get clock\n");
1338 ret = PTR_ERR(qspi->clk);
1339 goto qspi_probe_err;
1340 }
1341
1342 ret = clk_prepare_enable(qspi->clk);
1343 if (ret) {
1344 dev_err(dev, "failed to prepare clock\n");
1345 goto qspi_probe_err;
1346 }
1347
1348 qspi->base_clk = clk_get_rate(qspi->clk);
1349 qspi->max_speed_hz = qspi->base_clk / (QSPI_SPBR_MIN * 2);
1350
1351 bcm_qspi_hw_init(qspi);
1352 init_completion(&qspi->mspi_done);
1353 init_completion(&qspi->bspi_done);
1354 qspi->curr_cs = -1;
1355
1356 platform_set_drvdata(pdev, qspi);
1357
1358 qspi->xfer_mode.width = -1;
1359 qspi->xfer_mode.addrlen = -1;
1360 qspi->xfer_mode.hp = -1;
1361
1362 ret = devm_spi_register_master(&pdev->dev, master);
1363 if (ret < 0) {
1364 dev_err(dev, "can't register master\n");
1365 goto qspi_reg_err;
1366 }
1367
1368 return 0;
1369
1370qspi_reg_err:
1371 bcm_qspi_hw_uninit(qspi);
1372 clk_disable_unprepare(qspi->clk);
1373qspi_probe_err:
1374 kfree(qspi->dev_ids);
1375qspi_resource_err:
1376 spi_master_put(master);
1377 return ret;
1378}
1379/* probe function to be called by SoC specific platform driver probe */
1380EXPORT_SYMBOL_GPL(bcm_qspi_probe);
1381
1382int bcm_qspi_remove(struct platform_device *pdev)
1383{
1384 struct bcm_qspi *qspi = platform_get_drvdata(pdev);
1385
1386 bcm_qspi_hw_uninit(qspi);
1387 clk_disable_unprepare(qspi->clk);
1388 kfree(qspi->dev_ids);
1389 spi_unregister_master(qspi->master);
1390
1391 return 0;
1392}
1393/* function to be called by SoC specific platform driver remove() */
1394EXPORT_SYMBOL_GPL(bcm_qspi_remove);
1395
1396static int __maybe_unused bcm_qspi_suspend(struct device *dev)
1397{
1398 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1399
1400 /* store the override strap value */
1401 if (!bcm_qspi_bspi_ver_three(qspi))
1402 qspi->s3_strap_override_ctrl =
1403 bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
1404
1405 spi_master_suspend(qspi->master);
1406 clk_disable(qspi->clk);
1407 bcm_qspi_hw_uninit(qspi);
1408
1409 return 0;
1410};
1411
1412static int __maybe_unused bcm_qspi_resume(struct device *dev)
1413{
1414 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1415 int ret = 0;
1416
1417 bcm_qspi_hw_init(qspi);
1418 bcm_qspi_chip_select(qspi, qspi->curr_cs);
1419 if (qspi->soc_intc)
1420 /* enable MSPI interrupt */
1421 qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
1422 true);
1423
1424 ret = clk_enable(qspi->clk);
1425 if (!ret)
1426 spi_master_resume(qspi->master);
1427
1428 return ret;
1429}
1430
1431SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
1432
1433/* pm_ops to be called by SoC specific platform driver */
1434EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
1435
1436MODULE_AUTHOR("Kamal Dasu");
1437MODULE_DESCRIPTION("Broadcom QSPI driver");
1438MODULE_LICENSE("GPL v2");
1439MODULE_ALIAS("platform:" DRIVER_NAME);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
4 *
5 * Copyright 2016 Broadcom
6 */
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/device.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/ioport.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_irq.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/spi/spi.h>
22#include <linux/spi/spi-mem.h>
23#include <linux/sysfs.h>
24#include <linux/types.h>
25#include "spi-bcm-qspi.h"
26
27#define DRIVER_NAME "bcm_qspi"
28
29
30/* BSPI register offsets */
31#define BSPI_REVISION_ID 0x000
32#define BSPI_SCRATCH 0x004
33#define BSPI_MAST_N_BOOT_CTRL 0x008
34#define BSPI_BUSY_STATUS 0x00c
35#define BSPI_INTR_STATUS 0x010
36#define BSPI_B0_STATUS 0x014
37#define BSPI_B0_CTRL 0x018
38#define BSPI_B1_STATUS 0x01c
39#define BSPI_B1_CTRL 0x020
40#define BSPI_STRAP_OVERRIDE_CTRL 0x024
41#define BSPI_FLEX_MODE_ENABLE 0x028
42#define BSPI_BITS_PER_CYCLE 0x02c
43#define BSPI_BITS_PER_PHASE 0x030
44#define BSPI_CMD_AND_MODE_BYTE 0x034
45#define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
46#define BSPI_BSPI_XOR_VALUE 0x03c
47#define BSPI_BSPI_XOR_ENABLE 0x040
48#define BSPI_BSPI_PIO_MODE_ENABLE 0x044
49#define BSPI_BSPI_PIO_IODIR 0x048
50#define BSPI_BSPI_PIO_DATA 0x04c
51
52/* RAF register offsets */
53#define BSPI_RAF_START_ADDR 0x100
54#define BSPI_RAF_NUM_WORDS 0x104
55#define BSPI_RAF_CTRL 0x108
56#define BSPI_RAF_FULLNESS 0x10c
57#define BSPI_RAF_WATERMARK 0x110
58#define BSPI_RAF_STATUS 0x114
59#define BSPI_RAF_READ_DATA 0x118
60#define BSPI_RAF_WORD_CNT 0x11c
61#define BSPI_RAF_CURR_ADDR 0x120
62
63/* Override mode masks */
64#define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
65#define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
66#define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
67#define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
68#define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
69
70#define BSPI_ADDRLEN_3BYTES 3
71#define BSPI_ADDRLEN_4BYTES 4
72
73#define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
74
75#define BSPI_RAF_CTRL_START_MASK BIT(0)
76#define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
77
78#define BSPI_BPP_MODE_SELECT_MASK BIT(8)
79#define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
80
81#define BSPI_READ_LENGTH 256
82
83/* MSPI register offsets */
84#define MSPI_SPCR0_LSB 0x000
85#define MSPI_SPCR0_MSB 0x004
86#define MSPI_SPCR1_LSB 0x008
87#define MSPI_SPCR1_MSB 0x00c
88#define MSPI_NEWQP 0x010
89#define MSPI_ENDQP 0x014
90#define MSPI_SPCR2 0x018
91#define MSPI_MSPI_STATUS 0x020
92#define MSPI_CPTQP 0x024
93#define MSPI_SPCR3 0x028
94#define MSPI_REV 0x02c
95#define MSPI_TXRAM 0x040
96#define MSPI_RXRAM 0x0c0
97#define MSPI_CDRAM 0x140
98#define MSPI_WRITE_LOCK 0x180
99
100#define MSPI_MASTER_BIT BIT(7)
101
102#define MSPI_NUM_CDRAM 16
103#define MSPI_CDRAM_CONT_BIT BIT(7)
104#define MSPI_CDRAM_BITSE_BIT BIT(6)
105#define MSPI_CDRAM_PCS 0xf
106
107#define MSPI_SPCR2_SPE BIT(6)
108#define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
109
110#define MSPI_SPCR3_FASTBR BIT(0)
111#define MSPI_SPCR3_FASTDT BIT(1)
112#define MSPI_SPCR3_SYSCLKSEL_MASK GENMASK(11, 10)
113#define MSPI_SPCR3_SYSCLKSEL_27 (MSPI_SPCR3_SYSCLKSEL_MASK & \
114 ~(BIT(10) | BIT(11)))
115#define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \
116 BIT(11))
117
118#define MSPI_MSPI_STATUS_SPIF BIT(0)
119
120#define INTR_BASE_BIT_SHIFT 0x02
121#define INTR_COUNT 0x07
122
123#define NUM_CHIPSELECT 4
124#define QSPI_SPBR_MAX 255U
125#define MSPI_BASE_FREQ 27000000UL
126
127#define OPCODE_DIOR 0xBB
128#define OPCODE_QIOR 0xEB
129#define OPCODE_DIOR_4B 0xBC
130#define OPCODE_QIOR_4B 0xEC
131
132#define MAX_CMD_SIZE 6
133
134#define ADDR_4MB_MASK GENMASK(22, 0)
135
136/* stop at end of transfer, no other reason */
137#define TRANS_STATUS_BREAK_NONE 0
138/* stop at end of spi_message */
139#define TRANS_STATUS_BREAK_EOM 1
140/* stop at end of spi_transfer if delay */
141#define TRANS_STATUS_BREAK_DELAY 2
142/* stop at end of spi_transfer if cs_change */
143#define TRANS_STATUS_BREAK_CS_CHANGE 4
144/* stop if we run out of bytes */
145#define TRANS_STATUS_BREAK_NO_BYTES 8
146
147/* events that make us stop filling TX slots */
148#define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
149 TRANS_STATUS_BREAK_DELAY | \
150 TRANS_STATUS_BREAK_CS_CHANGE)
151
152/* events that make us deassert CS */
153#define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
154 TRANS_STATUS_BREAK_CS_CHANGE)
155
156struct bcm_qspi_parms {
157 u32 speed_hz;
158 u8 mode;
159 u8 bits_per_word;
160};
161
162struct bcm_xfer_mode {
163 bool flex_mode;
164 unsigned int width;
165 unsigned int addrlen;
166 unsigned int hp;
167};
168
169enum base_type {
170 MSPI,
171 BSPI,
172 CHIP_SELECT,
173 BASEMAX,
174};
175
176enum irq_source {
177 SINGLE_L2,
178 MUXED_L1,
179};
180
181struct bcm_qspi_irq {
182 const char *irq_name;
183 const irq_handler_t irq_handler;
184 int irq_source;
185 u32 mask;
186};
187
188struct bcm_qspi_dev_id {
189 const struct bcm_qspi_irq *irqp;
190 void *dev;
191};
192
193
194struct qspi_trans {
195 struct spi_transfer *trans;
196 int byte;
197 bool mspi_last_trans;
198};
199
200struct bcm_qspi {
201 struct platform_device *pdev;
202 struct spi_master *master;
203 struct clk *clk;
204 u32 base_clk;
205 u32 max_speed_hz;
206 void __iomem *base[BASEMAX];
207
208 /* Some SoCs provide custom interrupt status register(s) */
209 struct bcm_qspi_soc_intc *soc_intc;
210
211 struct bcm_qspi_parms last_parms;
212 struct qspi_trans trans_pos;
213 int curr_cs;
214 int bspi_maj_rev;
215 int bspi_min_rev;
216 int bspi_enabled;
217 const struct spi_mem_op *bspi_rf_op;
218 u32 bspi_rf_op_idx;
219 u32 bspi_rf_op_len;
220 u32 bspi_rf_op_status;
221 struct bcm_xfer_mode xfer_mode;
222 u32 s3_strap_override_ctrl;
223 bool bspi_mode;
224 bool big_endian;
225 int num_irqs;
226 struct bcm_qspi_dev_id *dev_ids;
227 struct completion mspi_done;
228 struct completion bspi_done;
229 u8 mspi_maj_rev;
230 u8 mspi_min_rev;
231 bool mspi_spcr3_sysclk;
232};
233
234static inline bool has_bspi(struct bcm_qspi *qspi)
235{
236 return qspi->bspi_mode;
237}
238
239/* hardware supports spcr3 and fast baud-rate */
240static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi)
241{
242 if (!has_bspi(qspi) &&
243 ((qspi->mspi_maj_rev >= 1) &&
244 (qspi->mspi_min_rev >= 5)))
245 return true;
246
247 return false;
248}
249
250/* hardware supports sys clk 108Mhz */
251static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
252{
253 if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk ||
254 ((qspi->mspi_maj_rev >= 1) &&
255 (qspi->mspi_min_rev >= 6))))
256 return true;
257
258 return false;
259}
260
261static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
262{
263 if (bcm_qspi_has_fastbr(qspi))
264 return 1;
265 else
266 return 8;
267}
268
269/* Read qspi controller register*/
270static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
271 unsigned int offset)
272{
273 return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
274}
275
276/* Write qspi controller register*/
277static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
278 unsigned int offset, unsigned int data)
279{
280 bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
281}
282
283/* BSPI helpers */
284static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
285{
286 int i;
287
288 /* this should normally finish within 10us */
289 for (i = 0; i < 1000; i++) {
290 if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
291 return 0;
292 udelay(1);
293 }
294 dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
295 return -EIO;
296}
297
298static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
299{
300 if (qspi->bspi_maj_rev < 4)
301 return true;
302 return false;
303}
304
305static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
306{
307 bcm_qspi_bspi_busy_poll(qspi);
308 /* Force rising edge for the b0/b1 'flush' field */
309 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
310 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
311 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
312 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
313}
314
315static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
316{
317 return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
318 BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
319}
320
321static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
322{
323 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
324
325 /* BSPI v3 LR is LE only, convert data to host endianness */
326 if (bcm_qspi_bspi_ver_three(qspi))
327 data = le32_to_cpu(data);
328
329 return data;
330}
331
332static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
333{
334 bcm_qspi_bspi_busy_poll(qspi);
335 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
336 BSPI_RAF_CTRL_START_MASK);
337}
338
339static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
340{
341 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
342 BSPI_RAF_CTRL_CLEAR_MASK);
343 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
344}
345
346static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
347{
348 u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
349 u32 data = 0;
350
351 dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
352 qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
353 while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
354 data = bcm_qspi_bspi_lr_read_fifo(qspi);
355 if (likely(qspi->bspi_rf_op_len >= 4) &&
356 IS_ALIGNED((uintptr_t)buf, 4)) {
357 buf[qspi->bspi_rf_op_idx++] = data;
358 qspi->bspi_rf_op_len -= 4;
359 } else {
360 /* Read out remaining bytes, make sure*/
361 u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
362
363 data = cpu_to_le32(data);
364 while (qspi->bspi_rf_op_len) {
365 *cbuf++ = (u8)data;
366 data >>= 8;
367 qspi->bspi_rf_op_len--;
368 }
369 }
370 }
371}
372
373static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
374 int bpp, int bpc, int flex_mode)
375{
376 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
377 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
378 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
379 bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
380 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
381}
382
383static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
384 const struct spi_mem_op *op, int hp)
385{
386 int bpc = 0, bpp = 0;
387 u8 command = op->cmd.opcode;
388 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
389 int addrlen = op->addr.nbytes;
390 int flex_mode = 1;
391
392 dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
393 width, addrlen, hp);
394
395 if (addrlen == BSPI_ADDRLEN_4BYTES)
396 bpp = BSPI_BPP_ADDR_SELECT_MASK;
397
398 bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
399
400 switch (width) {
401 case SPI_NBITS_SINGLE:
402 if (addrlen == BSPI_ADDRLEN_3BYTES)
403 /* default mode, does not need flex_cmd */
404 flex_mode = 0;
405 break;
406 case SPI_NBITS_DUAL:
407 bpc = 0x00000001;
408 if (hp) {
409 bpc |= 0x00010100; /* address and mode are 2-bit */
410 bpp = BSPI_BPP_MODE_SELECT_MASK;
411 }
412 break;
413 case SPI_NBITS_QUAD:
414 bpc = 0x00000002;
415 if (hp) {
416 bpc |= 0x00020200; /* address and mode are 4-bit */
417 bpp |= BSPI_BPP_MODE_SELECT_MASK;
418 }
419 break;
420 default:
421 return -EINVAL;
422 }
423
424 bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
425
426 return 0;
427}
428
429static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
430 const struct spi_mem_op *op, int hp)
431{
432 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
433 int addrlen = op->addr.nbytes;
434 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
435
436 dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
437 width, addrlen, hp);
438
439 switch (width) {
440 case SPI_NBITS_SINGLE:
441 /* clear quad/dual mode */
442 data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
443 BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
444 break;
445 case SPI_NBITS_QUAD:
446 /* clear dual mode and set quad mode */
447 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
448 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
449 break;
450 case SPI_NBITS_DUAL:
451 /* clear quad mode set dual mode */
452 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
453 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
454 break;
455 default:
456 return -EINVAL;
457 }
458
459 if (addrlen == BSPI_ADDRLEN_4BYTES)
460 /* set 4byte mode*/
461 data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
462 else
463 /* clear 4 byte mode */
464 data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
465
466 /* set the override mode */
467 data |= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
468 bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
469 bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
470
471 return 0;
472}
473
474static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
475 const struct spi_mem_op *op, int hp)
476{
477 int error = 0;
478 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
479 int addrlen = op->addr.nbytes;
480
481 /* default mode */
482 qspi->xfer_mode.flex_mode = true;
483
484 if (!bcm_qspi_bspi_ver_three(qspi)) {
485 u32 val, mask;
486
487 val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
488 mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
489 if (val & mask || qspi->s3_strap_override_ctrl & mask) {
490 qspi->xfer_mode.flex_mode = false;
491 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
492 error = bcm_qspi_bspi_set_override(qspi, op, hp);
493 }
494 }
495
496 if (qspi->xfer_mode.flex_mode)
497 error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
498
499 if (error) {
500 dev_warn(&qspi->pdev->dev,
501 "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
502 width, addrlen, hp);
503 } else if (qspi->xfer_mode.width != width ||
504 qspi->xfer_mode.addrlen != addrlen ||
505 qspi->xfer_mode.hp != hp) {
506 qspi->xfer_mode.width = width;
507 qspi->xfer_mode.addrlen = addrlen;
508 qspi->xfer_mode.hp = hp;
509 dev_dbg(&qspi->pdev->dev,
510 "cs:%d %d-lane output, %d-byte address%s\n",
511 qspi->curr_cs,
512 qspi->xfer_mode.width,
513 qspi->xfer_mode.addrlen,
514 qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
515 }
516
517 return error;
518}
519
520static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
521{
522 if (!has_bspi(qspi))
523 return;
524
525 qspi->bspi_enabled = 1;
526 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
527 return;
528
529 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
530 udelay(1);
531 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
532 udelay(1);
533}
534
535static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
536{
537 if (!has_bspi(qspi))
538 return;
539
540 qspi->bspi_enabled = 0;
541 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
542 return;
543
544 bcm_qspi_bspi_busy_poll(qspi);
545 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
546 udelay(1);
547}
548
549static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
550{
551 u32 rd = 0;
552 u32 wr = 0;
553
554 if (qspi->base[CHIP_SELECT]) {
555 rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
556 wr = (rd & ~0xff) | (1 << cs);
557 if (rd == wr)
558 return;
559 bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
560 usleep_range(10, 20);
561 }
562
563 dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
564 qspi->curr_cs = cs;
565}
566
567/* MSPI helpers */
568static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
569 const struct bcm_qspi_parms *xp)
570{
571 u32 spcr, spbr = 0;
572
573 if (xp->speed_hz)
574 spbr = qspi->base_clk / (2 * xp->speed_hz);
575
576 spcr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
577 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr);
578
579 if (!qspi->mspi_maj_rev)
580 /* legacy controller */
581 spcr = MSPI_MASTER_BIT;
582 else
583 spcr = 0;
584
585 /* for 16 bit the data should be zero */
586 if (xp->bits_per_word != 16)
587 spcr |= xp->bits_per_word << 2;
588 spcr |= xp->mode & 3;
589
590 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
591
592 if (bcm_qspi_has_fastbr(qspi)) {
593 spcr = 0;
594
595 /* enable fastbr */
596 spcr |= MSPI_SPCR3_FASTBR;
597
598 if (bcm_qspi_has_sysclk_108(qspi)) {
599 /* SYSCLK_108 */
600 spcr |= MSPI_SPCR3_SYSCLKSEL_108;
601 qspi->base_clk = MSPI_BASE_FREQ * 4;
602 /* Change spbr as we changed sysclk */
603 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, 4);
604 }
605
606 bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
607 }
608
609 qspi->last_parms = *xp;
610}
611
612static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
613 struct spi_device *spi,
614 struct spi_transfer *trans)
615{
616 struct bcm_qspi_parms xp;
617
618 xp.speed_hz = trans->speed_hz;
619 xp.bits_per_word = trans->bits_per_word;
620 xp.mode = spi->mode;
621
622 bcm_qspi_hw_set_parms(qspi, &xp);
623}
624
625static int bcm_qspi_setup(struct spi_device *spi)
626{
627 struct bcm_qspi_parms *xp;
628
629 if (spi->bits_per_word > 16)
630 return -EINVAL;
631
632 xp = spi_get_ctldata(spi);
633 if (!xp) {
634 xp = kzalloc(sizeof(*xp), GFP_KERNEL);
635 if (!xp)
636 return -ENOMEM;
637 spi_set_ctldata(spi, xp);
638 }
639 xp->speed_hz = spi->max_speed_hz;
640 xp->mode = spi->mode;
641
642 if (spi->bits_per_word)
643 xp->bits_per_word = spi->bits_per_word;
644 else
645 xp->bits_per_word = 8;
646
647 return 0;
648}
649
650static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
651 struct qspi_trans *qt)
652{
653 if (qt->mspi_last_trans &&
654 spi_transfer_is_last(qspi->master, qt->trans))
655 return true;
656 else
657 return false;
658}
659
660static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
661 struct qspi_trans *qt, int flags)
662{
663 int ret = TRANS_STATUS_BREAK_NONE;
664
665 /* count the last transferred bytes */
666 if (qt->trans->bits_per_word <= 8)
667 qt->byte++;
668 else
669 qt->byte += 2;
670
671 if (qt->byte >= qt->trans->len) {
672 /* we're at the end of the spi_transfer */
673 /* in TX mode, need to pause for a delay or CS change */
674 if (qt->trans->delay_usecs &&
675 (flags & TRANS_STATUS_BREAK_DELAY))
676 ret |= TRANS_STATUS_BREAK_DELAY;
677 if (qt->trans->cs_change &&
678 (flags & TRANS_STATUS_BREAK_CS_CHANGE))
679 ret |= TRANS_STATUS_BREAK_CS_CHANGE;
680
681 if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
682 ret |= TRANS_STATUS_BREAK_EOM;
683 else
684 ret |= TRANS_STATUS_BREAK_NO_BYTES;
685
686 qt->trans = NULL;
687 }
688
689 dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
690 qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
691 return ret;
692}
693
694static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
695{
696 u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
697
698 /* mask out reserved bits */
699 return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
700}
701
702static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
703{
704 u32 reg_offset = MSPI_RXRAM;
705 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
706 u32 msb_offset = reg_offset + (slot << 3);
707
708 return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
709 ((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
710}
711
712static void read_from_hw(struct bcm_qspi *qspi, int slots)
713{
714 struct qspi_trans tp;
715 int slot;
716
717 bcm_qspi_disable_bspi(qspi);
718
719 if (slots > MSPI_NUM_CDRAM) {
720 /* should never happen */
721 dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
722 return;
723 }
724
725 tp = qspi->trans_pos;
726
727 for (slot = 0; slot < slots; slot++) {
728 if (tp.trans->bits_per_word <= 8) {
729 u8 *buf = tp.trans->rx_buf;
730
731 if (buf)
732 buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
733 dev_dbg(&qspi->pdev->dev, "RD %02x\n",
734 buf ? buf[tp.byte] : 0x0);
735 } else {
736 u16 *buf = tp.trans->rx_buf;
737
738 if (buf)
739 buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
740 slot);
741 dev_dbg(&qspi->pdev->dev, "RD %04x\n",
742 buf ? buf[tp.byte / 2] : 0x0);
743 }
744
745 update_qspi_trans_byte_count(qspi, &tp,
746 TRANS_STATUS_BREAK_NONE);
747 }
748
749 qspi->trans_pos = tp;
750}
751
752static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
753 u8 val)
754{
755 u32 reg_offset = MSPI_TXRAM + (slot << 3);
756
757 /* mask out reserved bits */
758 bcm_qspi_write(qspi, MSPI, reg_offset, val);
759}
760
761static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
762 u16 val)
763{
764 u32 reg_offset = MSPI_TXRAM;
765 u32 msb_offset = reg_offset + (slot << 3);
766 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
767
768 bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
769 bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
770}
771
772static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
773{
774 return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
775}
776
777static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
778{
779 bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
780}
781
782/* Return number of slots written */
783static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
784{
785 struct qspi_trans tp;
786 int slot = 0, tstatus = 0;
787 u32 mspi_cdram = 0;
788
789 bcm_qspi_disable_bspi(qspi);
790 tp = qspi->trans_pos;
791 bcm_qspi_update_parms(qspi, spi, tp.trans);
792
793 /* Run until end of transfer or reached the max data */
794 while (!tstatus && slot < MSPI_NUM_CDRAM) {
795 if (tp.trans->bits_per_word <= 8) {
796 const u8 *buf = tp.trans->tx_buf;
797 u8 val = buf ? buf[tp.byte] : 0x00;
798
799 write_txram_slot_u8(qspi, slot, val);
800 dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
801 } else {
802 const u16 *buf = tp.trans->tx_buf;
803 u16 val = buf ? buf[tp.byte / 2] : 0x0000;
804
805 write_txram_slot_u16(qspi, slot, val);
806 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
807 }
808 mspi_cdram = MSPI_CDRAM_CONT_BIT;
809
810 if (has_bspi(qspi))
811 mspi_cdram &= ~1;
812 else
813 mspi_cdram |= (~(1 << spi->chip_select) &
814 MSPI_CDRAM_PCS);
815
816 mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
817 MSPI_CDRAM_BITSE_BIT);
818
819 write_cdram_slot(qspi, slot, mspi_cdram);
820
821 tstatus = update_qspi_trans_byte_count(qspi, &tp,
822 TRANS_STATUS_BREAK_TX);
823 slot++;
824 }
825
826 if (!slot) {
827 dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
828 goto done;
829 }
830
831 dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
832 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
833 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
834
835 /*
836 * case 1) EOM =1, cs_change =0: SSb inactive
837 * case 2) EOM =1, cs_change =1: SSb stay active
838 * case 3) EOM =0, cs_change =0: SSb stay active
839 * case 4) EOM =0, cs_change =1: SSb inactive
840 */
841 if (((tstatus & TRANS_STATUS_BREAK_DESELECT)
842 == TRANS_STATUS_BREAK_CS_CHANGE) ||
843 ((tstatus & TRANS_STATUS_BREAK_DESELECT)
844 == TRANS_STATUS_BREAK_EOM)) {
845 mspi_cdram = read_cdram_slot(qspi, slot - 1) &
846 ~MSPI_CDRAM_CONT_BIT;
847 write_cdram_slot(qspi, slot - 1, mspi_cdram);
848 }
849
850 if (has_bspi(qspi))
851 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
852
853 /* Must flush previous writes before starting MSPI operation */
854 mb();
855 /* Set cont | spe | spifie */
856 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
857
858done:
859 return slot;
860}
861
862static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
863 const struct spi_mem_op *op)
864{
865 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
866 u32 addr = 0, len, rdlen, len_words, from = 0;
867 int ret = 0;
868 unsigned long timeo = msecs_to_jiffies(100);
869 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
870
871 if (bcm_qspi_bspi_ver_three(qspi))
872 if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
873 return -EIO;
874
875 from = op->addr.val;
876 if (!spi->cs_gpiod)
877 bcm_qspi_chip_select(qspi, spi->chip_select);
878 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
879
880 /*
881 * when using flex mode we need to send
882 * the upper address byte to bspi
883 */
884 if (bcm_qspi_bspi_ver_three(qspi) == false) {
885 addr = from & 0xff000000;
886 bcm_qspi_write(qspi, BSPI,
887 BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
888 }
889
890 if (!qspi->xfer_mode.flex_mode)
891 addr = from;
892 else
893 addr = from & 0x00ffffff;
894
895 if (bcm_qspi_bspi_ver_three(qspi) == true)
896 addr = (addr + 0xc00000) & 0xffffff;
897
898 /*
899 * read into the entire buffer by breaking the reads
900 * into RAF buffer read lengths
901 */
902 len = op->data.nbytes;
903 qspi->bspi_rf_op_idx = 0;
904
905 do {
906 if (len > BSPI_READ_LENGTH)
907 rdlen = BSPI_READ_LENGTH;
908 else
909 rdlen = len;
910
911 reinit_completion(&qspi->bspi_done);
912 bcm_qspi_enable_bspi(qspi);
913 len_words = (rdlen + 3) >> 2;
914 qspi->bspi_rf_op = op;
915 qspi->bspi_rf_op_status = 0;
916 qspi->bspi_rf_op_len = rdlen;
917 dev_dbg(&qspi->pdev->dev,
918 "bspi xfr addr 0x%x len 0x%x", addr, rdlen);
919 bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
920 bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
921 bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
922 if (qspi->soc_intc) {
923 /*
924 * clear soc MSPI and BSPI interrupts and enable
925 * BSPI interrupts.
926 */
927 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
928 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
929 }
930
931 /* Must flush previous writes before starting BSPI operation */
932 mb();
933 bcm_qspi_bspi_lr_start(qspi);
934 if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
935 dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
936 ret = -ETIMEDOUT;
937 break;
938 }
939
940 /* set msg return length */
941 addr += rdlen;
942 len -= rdlen;
943 } while (len);
944
945 return ret;
946}
947
948static int bcm_qspi_transfer_one(struct spi_master *master,
949 struct spi_device *spi,
950 struct spi_transfer *trans)
951{
952 struct bcm_qspi *qspi = spi_master_get_devdata(master);
953 int slots;
954 unsigned long timeo = msecs_to_jiffies(100);
955
956 if (!spi->cs_gpiod)
957 bcm_qspi_chip_select(qspi, spi->chip_select);
958 qspi->trans_pos.trans = trans;
959 qspi->trans_pos.byte = 0;
960
961 while (qspi->trans_pos.byte < trans->len) {
962 reinit_completion(&qspi->mspi_done);
963
964 slots = write_to_hw(qspi, spi);
965 if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
966 dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
967 return -ETIMEDOUT;
968 }
969
970 read_from_hw(qspi, slots);
971 }
972 bcm_qspi_enable_bspi(qspi);
973
974 return 0;
975}
976
977static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
978 const struct spi_mem_op *op)
979{
980 struct spi_master *master = spi->master;
981 struct bcm_qspi *qspi = spi_master_get_devdata(master);
982 struct spi_transfer t[2];
983 u8 cmd[6] = { };
984 int ret, i;
985
986 memset(cmd, 0, sizeof(cmd));
987 memset(t, 0, sizeof(t));
988
989 /* tx */
990 /* opcode is in cmd[0] */
991 cmd[0] = op->cmd.opcode;
992 for (i = 0; i < op->addr.nbytes; i++)
993 cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
994
995 t[0].tx_buf = cmd;
996 t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
997 t[0].bits_per_word = spi->bits_per_word;
998 t[0].tx_nbits = op->cmd.buswidth;
999 /* lets mspi know that this is not last transfer */
1000 qspi->trans_pos.mspi_last_trans = false;
1001 ret = bcm_qspi_transfer_one(master, spi, &t[0]);
1002
1003 /* rx */
1004 qspi->trans_pos.mspi_last_trans = true;
1005 if (!ret) {
1006 /* rx */
1007 t[1].rx_buf = op->data.buf.in;
1008 t[1].len = op->data.nbytes;
1009 t[1].rx_nbits = op->data.buswidth;
1010 t[1].bits_per_word = spi->bits_per_word;
1011 ret = bcm_qspi_transfer_one(master, spi, &t[1]);
1012 }
1013
1014 return ret;
1015}
1016
1017static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
1018 const struct spi_mem_op *op)
1019{
1020 struct spi_device *spi = mem->spi;
1021 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
1022 int ret = 0;
1023 bool mspi_read = false;
1024 u32 addr = 0, len;
1025 u_char *buf;
1026
1027 if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
1028 op->data.dir != SPI_MEM_DATA_IN)
1029 return -ENOTSUPP;
1030
1031 buf = op->data.buf.in;
1032 addr = op->addr.val;
1033 len = op->data.nbytes;
1034
1035 if (bcm_qspi_bspi_ver_three(qspi) == true) {
1036 /*
1037 * The address coming into this function is a raw flash offset.
1038 * But for BSPI <= V3, we need to convert it to a remapped BSPI
1039 * address. If it crosses a 4MB boundary, just revert back to
1040 * using MSPI.
1041 */
1042 addr = (addr + 0xc00000) & 0xffffff;
1043
1044 if ((~ADDR_4MB_MASK & addr) ^
1045 (~ADDR_4MB_MASK & (addr + len - 1)))
1046 mspi_read = true;
1047 }
1048
1049 /* non-aligned and very short transfers are handled by MSPI */
1050 if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
1051 len < 4)
1052 mspi_read = true;
1053
1054 if (mspi_read)
1055 return bcm_qspi_mspi_exec_mem_op(spi, op);
1056
1057 ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
1058
1059 if (!ret)
1060 ret = bcm_qspi_bspi_exec_mem_op(spi, op);
1061
1062 return ret;
1063}
1064
1065static void bcm_qspi_cleanup(struct spi_device *spi)
1066{
1067 struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
1068
1069 kfree(xp);
1070}
1071
1072static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
1073{
1074 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1075 struct bcm_qspi *qspi = qspi_dev_id->dev;
1076 u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1077
1078 if (status & MSPI_MSPI_STATUS_SPIF) {
1079 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1080 /* clear interrupt */
1081 status &= ~MSPI_MSPI_STATUS_SPIF;
1082 bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
1083 if (qspi->soc_intc)
1084 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
1085 complete(&qspi->mspi_done);
1086 return IRQ_HANDLED;
1087 }
1088
1089 return IRQ_NONE;
1090}
1091
1092static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
1093{
1094 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1095 struct bcm_qspi *qspi = qspi_dev_id->dev;
1096 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1097 u32 status = qspi_dev_id->irqp->mask;
1098
1099 if (qspi->bspi_enabled && qspi->bspi_rf_op) {
1100 bcm_qspi_bspi_lr_data_read(qspi);
1101 if (qspi->bspi_rf_op_len == 0) {
1102 qspi->bspi_rf_op = NULL;
1103 if (qspi->soc_intc) {
1104 /* disable soc BSPI interrupt */
1105 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
1106 false);
1107 /* indicate done */
1108 status = INTR_BSPI_LR_SESSION_DONE_MASK;
1109 }
1110
1111 if (qspi->bspi_rf_op_status)
1112 bcm_qspi_bspi_lr_clear(qspi);
1113 else
1114 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
1115 }
1116
1117 if (qspi->soc_intc)
1118 /* clear soc BSPI interrupt */
1119 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
1120 }
1121
1122 status &= INTR_BSPI_LR_SESSION_DONE_MASK;
1123 if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
1124 complete(&qspi->bspi_done);
1125
1126 return IRQ_HANDLED;
1127}
1128
1129static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
1130{
1131 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1132 struct bcm_qspi *qspi = qspi_dev_id->dev;
1133 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1134
1135 dev_err(&qspi->pdev->dev, "BSPI INT error\n");
1136 qspi->bspi_rf_op_status = -EIO;
1137 if (qspi->soc_intc)
1138 /* clear soc interrupt */
1139 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
1140
1141 complete(&qspi->bspi_done);
1142 return IRQ_HANDLED;
1143}
1144
1145static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
1146{
1147 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1148 struct bcm_qspi *qspi = qspi_dev_id->dev;
1149 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1150 irqreturn_t ret = IRQ_NONE;
1151
1152 if (soc_intc) {
1153 u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
1154
1155 if (status & MSPI_DONE)
1156 ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
1157 else if (status & BSPI_DONE)
1158 ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
1159 else if (status & BSPI_ERR)
1160 ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
1161 }
1162
1163 return ret;
1164}
1165
1166static const struct bcm_qspi_irq qspi_irq_tab[] = {
1167 {
1168 .irq_name = "spi_lr_fullness_reached",
1169 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1170 .mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
1171 },
1172 {
1173 .irq_name = "spi_lr_session_aborted",
1174 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1175 .mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
1176 },
1177 {
1178 .irq_name = "spi_lr_impatient",
1179 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1180 .mask = INTR_BSPI_LR_IMPATIENT_MASK,
1181 },
1182 {
1183 .irq_name = "spi_lr_session_done",
1184 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1185 .mask = INTR_BSPI_LR_SESSION_DONE_MASK,
1186 },
1187#ifdef QSPI_INT_DEBUG
1188 /* this interrupt is for debug purposes only, dont request irq */
1189 {
1190 .irq_name = "spi_lr_overread",
1191 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1192 .mask = INTR_BSPI_LR_OVERREAD_MASK,
1193 },
1194#endif
1195 {
1196 .irq_name = "mspi_done",
1197 .irq_handler = bcm_qspi_mspi_l2_isr,
1198 .mask = INTR_MSPI_DONE_MASK,
1199 },
1200 {
1201 .irq_name = "mspi_halted",
1202 .irq_handler = bcm_qspi_mspi_l2_isr,
1203 .mask = INTR_MSPI_HALTED_MASK,
1204 },
1205 {
1206 /* single muxed L1 interrupt source */
1207 .irq_name = "spi_l1_intr",
1208 .irq_handler = bcm_qspi_l1_isr,
1209 .irq_source = MUXED_L1,
1210 .mask = QSPI_INTERRUPTS_ALL,
1211 },
1212};
1213
1214static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
1215{
1216 u32 val = 0;
1217
1218 val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
1219 qspi->bspi_maj_rev = (val >> 8) & 0xff;
1220 qspi->bspi_min_rev = val & 0xff;
1221 if (!(bcm_qspi_bspi_ver_three(qspi))) {
1222 /* Force mapping of BSPI address -> flash offset */
1223 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
1224 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
1225 }
1226 qspi->bspi_enabled = 1;
1227 bcm_qspi_disable_bspi(qspi);
1228 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
1229 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
1230}
1231
1232static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
1233{
1234 struct bcm_qspi_parms parms;
1235
1236 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
1237 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
1238 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
1239 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
1240 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
1241
1242 parms.mode = SPI_MODE_3;
1243 parms.bits_per_word = 8;
1244 parms.speed_hz = qspi->max_speed_hz;
1245 bcm_qspi_hw_set_parms(qspi, &parms);
1246
1247 if (has_bspi(qspi))
1248 bcm_qspi_bspi_init(qspi);
1249}
1250
1251static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
1252{
1253 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
1254 if (has_bspi(qspi))
1255 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1256
1257}
1258
1259static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
1260 .exec_op = bcm_qspi_exec_mem_op,
1261};
1262
1263struct bcm_qspi_data {
1264 bool has_mspi_rev;
1265 bool has_spcr3_sysclk;
1266};
1267
1268static const struct bcm_qspi_data bcm_qspi_no_rev_data = {
1269 .has_mspi_rev = false,
1270 .has_spcr3_sysclk = false,
1271};
1272
1273static const struct bcm_qspi_data bcm_qspi_rev_data = {
1274 .has_mspi_rev = true,
1275 .has_spcr3_sysclk = false,
1276};
1277
1278static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
1279 .has_mspi_rev = true,
1280 .has_spcr3_sysclk = true,
1281};
1282
1283static const struct of_device_id bcm_qspi_of_match[] = {
1284 {
1285 .compatible = "brcm,spi-bcm7425-qspi",
1286 .data = &bcm_qspi_no_rev_data,
1287 },
1288 {
1289 .compatible = "brcm,spi-bcm7429-qspi",
1290 .data = &bcm_qspi_no_rev_data,
1291 },
1292 {
1293 .compatible = "brcm,spi-bcm7435-qspi",
1294 .data = &bcm_qspi_no_rev_data,
1295 },
1296 {
1297 .compatible = "brcm,spi-bcm-qspi",
1298 .data = &bcm_qspi_no_rev_data,
1299 },
1300 {
1301 .compatible = "brcm,spi-bcm7216-qspi",
1302 .data = &bcm_qspi_spcr3_data,
1303 },
1304 {
1305 .compatible = "brcm,spi-bcm7278-qspi",
1306 .data = &bcm_qspi_spcr3_data,
1307 },
1308 {},
1309};
1310MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
1311
1312int bcm_qspi_probe(struct platform_device *pdev,
1313 struct bcm_qspi_soc_intc *soc_intc)
1314{
1315 const struct of_device_id *of_id = NULL;
1316 const struct bcm_qspi_data *data;
1317 struct device *dev = &pdev->dev;
1318 struct bcm_qspi *qspi;
1319 struct spi_master *master;
1320 struct resource *res;
1321 int irq, ret = 0, num_ints = 0;
1322 u32 val;
1323 u32 rev = 0;
1324 const char *name = NULL;
1325 int num_irqs = ARRAY_SIZE(qspi_irq_tab);
1326
1327 /* We only support device-tree instantiation */
1328 if (!dev->of_node)
1329 return -ENODEV;
1330
1331 of_id = of_match_node(bcm_qspi_of_match, dev->of_node);
1332 if (!of_id)
1333 return -ENODEV;
1334
1335 data = of_id->data;
1336
1337 master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
1338 if (!master) {
1339 dev_err(dev, "error allocating spi_master\n");
1340 return -ENOMEM;
1341 }
1342
1343 qspi = spi_master_get_devdata(master);
1344
1345 qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
1346 if (IS_ERR(qspi->clk))
1347 return PTR_ERR(qspi->clk);
1348
1349 qspi->pdev = pdev;
1350 qspi->trans_pos.trans = NULL;
1351 qspi->trans_pos.byte = 0;
1352 qspi->trans_pos.mspi_last_trans = true;
1353 qspi->master = master;
1354
1355 master->bus_num = -1;
1356 master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD;
1357 master->setup = bcm_qspi_setup;
1358 master->transfer_one = bcm_qspi_transfer_one;
1359 master->mem_ops = &bcm_qspi_mem_ops;
1360 master->cleanup = bcm_qspi_cleanup;
1361 master->dev.of_node = dev->of_node;
1362 master->num_chipselect = NUM_CHIPSELECT;
1363 master->use_gpio_descriptors = true;
1364
1365 qspi->big_endian = of_device_is_big_endian(dev->of_node);
1366
1367 if (!of_property_read_u32(dev->of_node, "num-cs", &val))
1368 master->num_chipselect = val;
1369
1370 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
1371 if (!res)
1372 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1373 "mspi");
1374
1375 if (res) {
1376 qspi->base[MSPI] = devm_ioremap_resource(dev, res);
1377 if (IS_ERR(qspi->base[MSPI])) {
1378 ret = PTR_ERR(qspi->base[MSPI]);
1379 goto qspi_resource_err;
1380 }
1381 } else {
1382 goto qspi_resource_err;
1383 }
1384
1385 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
1386 if (res) {
1387 qspi->base[BSPI] = devm_ioremap_resource(dev, res);
1388 if (IS_ERR(qspi->base[BSPI])) {
1389 ret = PTR_ERR(qspi->base[BSPI]);
1390 goto qspi_resource_err;
1391 }
1392 qspi->bspi_mode = true;
1393 } else {
1394 qspi->bspi_mode = false;
1395 }
1396
1397 dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
1398
1399 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
1400 if (res) {
1401 qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
1402 if (IS_ERR(qspi->base[CHIP_SELECT])) {
1403 ret = PTR_ERR(qspi->base[CHIP_SELECT]);
1404 goto qspi_resource_err;
1405 }
1406 }
1407
1408 qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
1409 GFP_KERNEL);
1410 if (!qspi->dev_ids) {
1411 ret = -ENOMEM;
1412 goto qspi_resource_err;
1413 }
1414
1415 for (val = 0; val < num_irqs; val++) {
1416 irq = -1;
1417 name = qspi_irq_tab[val].irq_name;
1418 if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
1419 /* get the l2 interrupts */
1420 irq = platform_get_irq_byname_optional(pdev, name);
1421 } else if (!num_ints && soc_intc) {
1422 /* all mspi, bspi intrs muxed to one L1 intr */
1423 irq = platform_get_irq(pdev, 0);
1424 }
1425
1426 if (irq >= 0) {
1427 ret = devm_request_irq(&pdev->dev, irq,
1428 qspi_irq_tab[val].irq_handler, 0,
1429 name,
1430 &qspi->dev_ids[val]);
1431 if (ret < 0) {
1432 dev_err(&pdev->dev, "IRQ %s not found\n", name);
1433 goto qspi_probe_err;
1434 }
1435
1436 qspi->dev_ids[val].dev = qspi;
1437 qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
1438 num_ints++;
1439 dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
1440 qspi_irq_tab[val].irq_name,
1441 irq);
1442 }
1443 }
1444
1445 if (!num_ints) {
1446 dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
1447 ret = -EINVAL;
1448 goto qspi_probe_err;
1449 }
1450
1451 /*
1452 * Some SoCs integrate spi controller (e.g., its interrupt bits)
1453 * in specific ways
1454 */
1455 if (soc_intc) {
1456 qspi->soc_intc = soc_intc;
1457 soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
1458 } else {
1459 qspi->soc_intc = NULL;
1460 }
1461
1462 ret = clk_prepare_enable(qspi->clk);
1463 if (ret) {
1464 dev_err(dev, "failed to prepare clock\n");
1465 goto qspi_probe_err;
1466 }
1467
1468 qspi->base_clk = clk_get_rate(qspi->clk);
1469
1470 if (data->has_mspi_rev) {
1471 rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
1472 /* some older revs do not have a MSPI_REV register */
1473 if ((rev & 0xff) == 0xff)
1474 rev = 0;
1475 }
1476
1477 qspi->mspi_maj_rev = (rev >> 4) & 0xf;
1478 qspi->mspi_min_rev = rev & 0xf;
1479 qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
1480
1481 qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
1482
1483 bcm_qspi_hw_init(qspi);
1484 init_completion(&qspi->mspi_done);
1485 init_completion(&qspi->bspi_done);
1486 qspi->curr_cs = -1;
1487
1488 platform_set_drvdata(pdev, qspi);
1489
1490 qspi->xfer_mode.width = -1;
1491 qspi->xfer_mode.addrlen = -1;
1492 qspi->xfer_mode.hp = -1;
1493
1494 ret = devm_spi_register_master(&pdev->dev, master);
1495 if (ret < 0) {
1496 dev_err(dev, "can't register master\n");
1497 goto qspi_reg_err;
1498 }
1499
1500 return 0;
1501
1502qspi_reg_err:
1503 bcm_qspi_hw_uninit(qspi);
1504 clk_disable_unprepare(qspi->clk);
1505qspi_probe_err:
1506 kfree(qspi->dev_ids);
1507qspi_resource_err:
1508 spi_master_put(master);
1509 return ret;
1510}
1511/* probe function to be called by SoC specific platform driver probe */
1512EXPORT_SYMBOL_GPL(bcm_qspi_probe);
1513
1514int bcm_qspi_remove(struct platform_device *pdev)
1515{
1516 struct bcm_qspi *qspi = platform_get_drvdata(pdev);
1517
1518 bcm_qspi_hw_uninit(qspi);
1519 clk_disable_unprepare(qspi->clk);
1520 kfree(qspi->dev_ids);
1521 spi_unregister_master(qspi->master);
1522
1523 return 0;
1524}
1525/* function to be called by SoC specific platform driver remove() */
1526EXPORT_SYMBOL_GPL(bcm_qspi_remove);
1527
1528static int __maybe_unused bcm_qspi_suspend(struct device *dev)
1529{
1530 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1531
1532 /* store the override strap value */
1533 if (!bcm_qspi_bspi_ver_three(qspi))
1534 qspi->s3_strap_override_ctrl =
1535 bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
1536
1537 spi_master_suspend(qspi->master);
1538 clk_disable_unprepare(qspi->clk);
1539 bcm_qspi_hw_uninit(qspi);
1540
1541 return 0;
1542};
1543
1544static int __maybe_unused bcm_qspi_resume(struct device *dev)
1545{
1546 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1547 int ret = 0;
1548
1549 bcm_qspi_hw_init(qspi);
1550 bcm_qspi_chip_select(qspi, qspi->curr_cs);
1551 if (qspi->soc_intc)
1552 /* enable MSPI interrupt */
1553 qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
1554 true);
1555
1556 ret = clk_prepare_enable(qspi->clk);
1557 if (!ret)
1558 spi_master_resume(qspi->master);
1559
1560 return ret;
1561}
1562
1563SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
1564
1565/* pm_ops to be called by SoC specific platform driver */
1566EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
1567
1568MODULE_AUTHOR("Kamal Dasu");
1569MODULE_DESCRIPTION("Broadcom QSPI driver");
1570MODULE_LICENSE("GPL v2");
1571MODULE_ALIAS("platform:" DRIVER_NAME);