Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * ASPEED FMC/SPI Memory Controller Driver
4 *
5 * Copyright (c) 2015-2022, IBM Corporation.
6 * Copyright (c) 2020, ASPEED Corporation.
7 */
8
9#include <linux/clk.h>
10#include <linux/module.h>
11#include <linux/of.h>
12#include <linux/of_platform.h>
13#include <linux/platform_device.h>
14#include <linux/spi/spi.h>
15#include <linux/spi/spi-mem.h>
16
17#define DEVICE_NAME "spi-aspeed-smc"
18
19/* Type setting Register */
20#define CONFIG_REG 0x0
21#define CONFIG_TYPE_SPI 0x2
22
23/* CE Control Register */
24#define CE_CTRL_REG 0x4
25
26/* CEx Control Register */
27#define CE0_CTRL_REG 0x10
28#define CTRL_IO_MODE_MASK GENMASK(30, 28)
29#define CTRL_IO_SINGLE_DATA 0x0
30#define CTRL_IO_DUAL_DATA BIT(29)
31#define CTRL_IO_QUAD_DATA BIT(30)
32#define CTRL_COMMAND_SHIFT 16
33#define CTRL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI only */
34#define CTRL_IO_DUMMY_SET(dummy) \
35 (((((dummy) >> 2) & 0x1) << 14) | (((dummy) & 0x3) << 6))
36#define CTRL_FREQ_SEL_SHIFT 8
37#define CTRL_FREQ_SEL_MASK GENMASK(11, CTRL_FREQ_SEL_SHIFT)
38#define CTRL_CE_STOP_ACTIVE BIT(2)
39#define CTRL_IO_MODE_CMD_MASK GENMASK(1, 0)
40#define CTRL_IO_MODE_NORMAL 0x0
41#define CTRL_IO_MODE_READ 0x1
42#define CTRL_IO_MODE_WRITE 0x2
43#define CTRL_IO_MODE_USER 0x3
44
45#define CTRL_IO_CMD_MASK 0xf0ff40c3
46
47/* CEx Address Decoding Range Register */
48#define CE0_SEGMENT_ADDR_REG 0x30
49
50/* CEx Read timing compensation register */
51#define CE0_TIMING_COMPENSATION_REG 0x94
52
53enum aspeed_spi_ctl_reg_value {
54 ASPEED_SPI_BASE,
55 ASPEED_SPI_READ,
56 ASPEED_SPI_WRITE,
57 ASPEED_SPI_MAX,
58};
59
60struct aspeed_spi;
61
62struct aspeed_spi_chip {
63 struct aspeed_spi *aspi;
64 u32 cs;
65 void __iomem *ctl;
66 void __iomem *ahb_base;
67 u32 ahb_window_size;
68 u32 ctl_val[ASPEED_SPI_MAX];
69 u32 clk_freq;
70};
71
72struct aspeed_spi_data {
73 u32 ctl0;
74 u32 max_cs;
75 bool hastype;
76 u32 mode_bits;
77 u32 we0;
78 u32 timing;
79 u32 hclk_mask;
80 u32 hdiv_max;
81
82 u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg);
83 u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg);
84 u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end);
85 int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv,
86 const u8 *golden_buf, u8 *test_buf);
87};
88
89#define ASPEED_SPI_MAX_NUM_CS 5
90
91struct aspeed_spi {
92 const struct aspeed_spi_data *data;
93
94 void __iomem *regs;
95 void __iomem *ahb_base;
96 u32 ahb_base_phy;
97 u32 ahb_window_size;
98 struct device *dev;
99
100 struct clk *clk;
101 u32 clk_freq;
102
103 struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS];
104};
105
106static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op)
107{
108 switch (op->data.buswidth) {
109 case 1:
110 return CTRL_IO_SINGLE_DATA;
111 case 2:
112 return CTRL_IO_DUAL_DATA;
113 case 4:
114 return CTRL_IO_QUAD_DATA;
115 default:
116 return CTRL_IO_SINGLE_DATA;
117 }
118}
119
120static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode)
121{
122 u32 ctl;
123
124 if (io_mode > 0) {
125 ctl = readl(chip->ctl) & ~CTRL_IO_MODE_MASK;
126 ctl |= io_mode;
127 writel(ctl, chip->ctl);
128 }
129}
130
131static void aspeed_spi_start_user(struct aspeed_spi_chip *chip)
132{
133 u32 ctl = chip->ctl_val[ASPEED_SPI_BASE];
134
135 ctl |= CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
136 writel(ctl, chip->ctl);
137
138 ctl &= ~CTRL_CE_STOP_ACTIVE;
139 writel(ctl, chip->ctl);
140}
141
142static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip)
143{
144 u32 ctl = chip->ctl_val[ASPEED_SPI_READ] |
145 CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
146
147 writel(ctl, chip->ctl);
148
149 /* Restore defaults */
150 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
151}
152
153static int aspeed_spi_read_from_ahb(void *buf, void __iomem *src, size_t len)
154{
155 size_t offset = 0;
156
157 if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
158 IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
159 ioread32_rep(src, buf, len >> 2);
160 offset = len & ~0x3;
161 len -= offset;
162 }
163 ioread8_rep(src, (u8 *)buf + offset, len);
164 return 0;
165}
166
167static int aspeed_spi_write_to_ahb(void __iomem *dst, const void *buf, size_t len)
168{
169 size_t offset = 0;
170
171 if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
172 IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
173 iowrite32_rep(dst, buf, len >> 2);
174 offset = len & ~0x3;
175 len -= offset;
176 }
177 iowrite8_rep(dst, (const u8 *)buf + offset, len);
178 return 0;
179}
180
181static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes,
182 u64 offset, u32 opcode)
183{
184 __be32 temp;
185 u32 cmdaddr;
186
187 switch (addr_nbytes) {
188 case 3:
189 cmdaddr = offset & 0xFFFFFF;
190 cmdaddr |= opcode << 24;
191
192 temp = cpu_to_be32(cmdaddr);
193 aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
194 break;
195 case 4:
196 temp = cpu_to_be32(offset);
197 aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1);
198 aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
199 break;
200 default:
201 WARN_ONCE(1, "Unexpected address width %u", addr_nbytes);
202 return -EOPNOTSUPP;
203 }
204 return 0;
205}
206
207static int aspeed_spi_read_reg(struct aspeed_spi_chip *chip,
208 const struct spi_mem_op *op)
209{
210 aspeed_spi_start_user(chip);
211 aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
212 aspeed_spi_read_from_ahb(op->data.buf.in,
213 chip->ahb_base, op->data.nbytes);
214 aspeed_spi_stop_user(chip);
215 return 0;
216}
217
218static int aspeed_spi_write_reg(struct aspeed_spi_chip *chip,
219 const struct spi_mem_op *op)
220{
221 aspeed_spi_start_user(chip);
222 aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
223 aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out,
224 op->data.nbytes);
225 aspeed_spi_stop_user(chip);
226 return 0;
227}
228
229static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
230 const struct spi_mem_op *op,
231 u64 offset, size_t len, void *buf)
232{
233 int io_mode = aspeed_spi_get_io_mode(op);
234 u8 dummy = 0xFF;
235 int i;
236 int ret;
237
238 aspeed_spi_start_user(chip);
239
240 ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode);
241 if (ret < 0)
242 return ret;
243
244 if (op->dummy.buswidth && op->dummy.nbytes) {
245 for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
246 aspeed_spi_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
247 }
248
249 aspeed_spi_set_io_mode(chip, io_mode);
250
251 aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
252 aspeed_spi_stop_user(chip);
253 return 0;
254}
255
256static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
257 const struct spi_mem_op *op)
258{
259 int ret;
260
261 aspeed_spi_start_user(chip);
262 ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode);
263 if (ret < 0)
264 return ret;
265 aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
266 aspeed_spi_stop_user(chip);
267 return 0;
268}
269
270/* support for 1-1-1, 1-1-2 or 1-1-4 */
271static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
272{
273 if (op->cmd.buswidth > 1)
274 return false;
275
276 if (op->addr.nbytes != 0) {
277 if (op->addr.buswidth > 1)
278 return false;
279 if (op->addr.nbytes < 3 || op->addr.nbytes > 4)
280 return false;
281 }
282
283 if (op->dummy.nbytes != 0) {
284 if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7)
285 return false;
286 }
287
288 if (op->data.nbytes != 0 && op->data.buswidth > 4)
289 return false;
290
291 return spi_mem_default_supports_op(mem, op);
292}
293
294static const struct aspeed_spi_data ast2400_spi_data;
295
296static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
297{
298 struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master);
299 struct aspeed_spi_chip *chip = &aspi->chips[mem->spi->chip_select];
300 u32 addr_mode, addr_mode_backup;
301 u32 ctl_val;
302 int ret = 0;
303
304 dev_dbg(aspi->dev,
305 "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
306 chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
307 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
308 op->dummy.buswidth, op->data.buswidth,
309 op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
310
311 addr_mode = readl(aspi->regs + CE_CTRL_REG);
312 addr_mode_backup = addr_mode;
313
314 ctl_val = chip->ctl_val[ASPEED_SPI_BASE];
315 ctl_val &= ~CTRL_IO_CMD_MASK;
316
317 ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT;
318
319 /* 4BYTE address mode */
320 if (op->addr.nbytes) {
321 if (op->addr.nbytes == 4)
322 addr_mode |= (0x11 << chip->cs);
323 else
324 addr_mode &= ~(0x11 << chip->cs);
325
326 if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
327 ctl_val |= CTRL_IO_ADDRESS_4B;
328 }
329
330 if (op->dummy.nbytes)
331 ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
332
333 if (op->data.nbytes)
334 ctl_val |= aspeed_spi_get_io_mode(op);
335
336 if (op->data.dir == SPI_MEM_DATA_OUT)
337 ctl_val |= CTRL_IO_MODE_WRITE;
338 else
339 ctl_val |= CTRL_IO_MODE_READ;
340
341 if (addr_mode != addr_mode_backup)
342 writel(addr_mode, aspi->regs + CE_CTRL_REG);
343 writel(ctl_val, chip->ctl);
344
345 if (op->data.dir == SPI_MEM_DATA_IN) {
346 if (!op->addr.nbytes)
347 ret = aspeed_spi_read_reg(chip, op);
348 else
349 ret = aspeed_spi_read_user(chip, op, op->addr.val,
350 op->data.nbytes, op->data.buf.in);
351 } else {
352 if (!op->addr.nbytes)
353 ret = aspeed_spi_write_reg(chip, op);
354 else
355 ret = aspeed_spi_write_user(chip, op);
356 }
357
358 /* Restore defaults */
359 if (addr_mode != addr_mode_backup)
360 writel(addr_mode_backup, aspi->regs + CE_CTRL_REG);
361 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
362 return ret;
363}
364
365static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
366{
367 int ret;
368
369 ret = do_aspeed_spi_exec_op(mem, op);
370 if (ret)
371 dev_err(&mem->spi->dev, "operation failed: %d\n", ret);
372 return ret;
373}
374
375static const char *aspeed_spi_get_name(struct spi_mem *mem)
376{
377 struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master);
378 struct device *dev = aspi->dev;
379
380 return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select);
381}
382
383struct aspeed_spi_window {
384 u32 cs;
385 u32 offset;
386 u32 size;
387};
388
389static void aspeed_spi_get_windows(struct aspeed_spi *aspi,
390 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS])
391{
392 const struct aspeed_spi_data *data = aspi->data;
393 u32 reg_val;
394 u32 cs;
395
396 for (cs = 0; cs < aspi->data->max_cs; cs++) {
397 reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4);
398 windows[cs].cs = cs;
399 windows[cs].size = data->segment_end(aspi, reg_val) -
400 data->segment_start(aspi, reg_val);
401 windows[cs].offset = data->segment_start(aspi, reg_val) - aspi->ahb_base_phy;
402 dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs,
403 windows[cs].offset, windows[cs].size);
404 }
405}
406
407/*
408 * On the AST2600, some CE windows are closed by default at reset but
409 * U-Boot should open all.
410 */
411static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip)
412{
413 struct aspeed_spi *aspi = chip->aspi;
414 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
415 struct aspeed_spi_window *win = &windows[chip->cs];
416
417 /* No segment registers for the AST2400 SPI controller */
418 if (aspi->data == &ast2400_spi_data) {
419 win->offset = 0;
420 win->size = aspi->ahb_window_size;
421 } else {
422 aspeed_spi_get_windows(aspi, windows);
423 }
424
425 chip->ahb_base = aspi->ahb_base + win->offset;
426 chip->ahb_window_size = win->size;
427
428 dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB",
429 chip->cs, aspi->ahb_base_phy + win->offset,
430 aspi->ahb_base_phy + win->offset + win->size - 1,
431 win->size >> 20);
432
433 return chip->ahb_window_size ? 0 : -1;
434}
435
436static int aspeed_spi_set_window(struct aspeed_spi *aspi,
437 const struct aspeed_spi_window *win)
438{
439 u32 start = aspi->ahb_base_phy + win->offset;
440 u32 end = start + win->size;
441 void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4;
442 u32 seg_val_backup = readl(seg_reg);
443 u32 seg_val = aspi->data->segment_reg(aspi, start, end);
444
445 if (seg_val == seg_val_backup)
446 return 0;
447
448 writel(seg_val, seg_reg);
449
450 /*
451 * Restore initial value if something goes wrong else we could
452 * loose access to the chip.
453 */
454 if (seg_val != readl(seg_reg)) {
455 dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB",
456 win->cs, start, end - 1, win->size >> 20);
457 writel(seg_val_backup, seg_reg);
458 return -EIO;
459 }
460
461 if (win->size)
462 dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB",
463 win->cs, start, end - 1, win->size >> 20);
464 else
465 dev_dbg(aspi->dev, "CE%d window closed", win->cs);
466
467 return 0;
468}
469
470/*
471 * Yet to be done when possible :
472 * - Align mappings on flash size (we don't have the info)
473 * - ioremap each window, not strictly necessary since the overall window
474 * is correct.
475 */
476static const struct aspeed_spi_data ast2500_spi_data;
477static const struct aspeed_spi_data ast2600_spi_data;
478static const struct aspeed_spi_data ast2600_fmc_data;
479
480static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip,
481 u32 local_offset, u32 size)
482{
483 struct aspeed_spi *aspi = chip->aspi;
484 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
485 struct aspeed_spi_window *win = &windows[chip->cs];
486 int ret;
487
488 /* No segment registers for the AST2400 SPI controller */
489 if (aspi->data == &ast2400_spi_data)
490 return 0;
491
492 /*
493 * Due to an HW issue on the AST2500 SPI controller, the CE0
494 * window size should be smaller than the maximum 128MB.
495 */
496 if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) {
497 size = 120 << 20;
498 dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)",
499 chip->cs, size >> 20);
500 }
501
502 /*
503 * The decoding size of AST2600 SPI controller should set at
504 * least 2MB.
505 */
506 if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) &&
507 size < SZ_2M) {
508 size = SZ_2M;
509 dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)",
510 chip->cs, size >> 20);
511 }
512
513 aspeed_spi_get_windows(aspi, windows);
514
515 /* Adjust this chip window */
516 win->offset += local_offset;
517 win->size = size;
518
519 if (win->offset + win->size > aspi->ahb_window_size) {
520 win->size = aspi->ahb_window_size - win->offset;
521 dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20);
522 }
523
524 ret = aspeed_spi_set_window(aspi, win);
525 if (ret)
526 return ret;
527
528 /* Update chip mapping info */
529 chip->ahb_base = aspi->ahb_base + win->offset;
530 chip->ahb_window_size = win->size;
531
532 /*
533 * Also adjust next chip window to make sure that it does not
534 * overlap with the current window.
535 */
536 if (chip->cs < aspi->data->max_cs - 1) {
537 struct aspeed_spi_window *next = &windows[chip->cs + 1];
538
539 /* Change offset and size to keep the same end address */
540 if ((next->offset + next->size) > (win->offset + win->size))
541 next->size = (next->offset + next->size) - (win->offset + win->size);
542 else
543 next->size = 0;
544 next->offset = win->offset + win->size;
545
546 aspeed_spi_set_window(aspi, next);
547 }
548 return 0;
549}
550
551static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip);
552
553static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
554{
555 struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master);
556 struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select];
557 struct spi_mem_op *op = &desc->info.op_tmpl;
558 u32 ctl_val;
559 int ret = 0;
560
561 dev_dbg(aspi->dev,
562 "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n",
563 chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
564 desc->info.offset, desc->info.offset + desc->info.length,
565 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
566 op->dummy.buswidth, op->data.buswidth,
567 op->addr.nbytes, op->dummy.nbytes);
568
569 chip->clk_freq = desc->mem->spi->max_speed_hz;
570
571 /* Only for reads */
572 if (op->data.dir != SPI_MEM_DATA_IN)
573 return -EOPNOTSUPP;
574
575 aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length);
576
577 if (desc->info.length > chip->ahb_window_size)
578 dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping",
579 chip->cs, chip->ahb_window_size >> 20);
580
581 /* Define the default IO read settings */
582 ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
583 ctl_val |= aspeed_spi_get_io_mode(op) |
584 op->cmd.opcode << CTRL_COMMAND_SHIFT |
585 CTRL_IO_MODE_READ;
586
587 if (op->dummy.nbytes)
588 ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
589
590 /* Tune 4BYTE address mode */
591 if (op->addr.nbytes) {
592 u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
593
594 if (op->addr.nbytes == 4)
595 addr_mode |= (0x11 << chip->cs);
596 else
597 addr_mode &= ~(0x11 << chip->cs);
598 writel(addr_mode, aspi->regs + CE_CTRL_REG);
599
600 /* AST2400 SPI controller sets 4BYTE address mode in
601 * CE0 Control Register
602 */
603 if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
604 ctl_val |= CTRL_IO_ADDRESS_4B;
605 }
606
607 /* READ mode is the controller default setting */
608 chip->ctl_val[ASPEED_SPI_READ] = ctl_val;
609 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
610
611 ret = aspeed_spi_do_calibration(chip);
612
613 dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n",
614 chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]);
615
616 return ret;
617}
618
619static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
620 u64 offset, size_t len, void *buf)
621{
622 struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master);
623 struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select];
624
625 /* Switch to USER command mode if mapping window is too small */
626 if (chip->ahb_window_size < offset + len) {
627 int ret;
628
629 ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf);
630 if (ret < 0)
631 return ret;
632 } else {
633 memcpy_fromio(buf, chip->ahb_base + offset, len);
634 }
635
636 return len;
637}
638
639static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
640 .supports_op = aspeed_spi_supports_op,
641 .exec_op = aspeed_spi_exec_op,
642 .get_name = aspeed_spi_get_name,
643 .dirmap_create = aspeed_spi_dirmap_create,
644 .dirmap_read = aspeed_spi_dirmap_read,
645};
646
647static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type)
648{
649 u32 reg;
650
651 reg = readl(aspi->regs + CONFIG_REG);
652 reg &= ~(0x3 << (cs * 2));
653 reg |= type << (cs * 2);
654 writel(reg, aspi->regs + CONFIG_REG);
655}
656
657static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, bool enable)
658{
659 u32 we_bit = BIT(aspi->data->we0 + cs);
660 u32 reg = readl(aspi->regs + CONFIG_REG);
661
662 if (enable)
663 reg |= we_bit;
664 else
665 reg &= ~we_bit;
666 writel(reg, aspi->regs + CONFIG_REG);
667}
668
669static int aspeed_spi_setup(struct spi_device *spi)
670{
671 struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master);
672 const struct aspeed_spi_data *data = aspi->data;
673 unsigned int cs = spi->chip_select;
674 struct aspeed_spi_chip *chip = &aspi->chips[cs];
675
676 chip->aspi = aspi;
677 chip->cs = cs;
678 chip->ctl = aspi->regs + data->ctl0 + cs * 4;
679
680 /* The driver only supports SPI type flash */
681 if (data->hastype)
682 aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI);
683
684 if (aspeed_spi_chip_set_default_window(chip) < 0) {
685 dev_warn(aspi->dev, "CE%d window invalid", cs);
686 return -EINVAL;
687 }
688
689 aspeed_spi_chip_enable(aspi, cs, true);
690
691 chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER;
692
693 dev_dbg(aspi->dev, "CE%d setup done\n", cs);
694 return 0;
695}
696
697static void aspeed_spi_cleanup(struct spi_device *spi)
698{
699 struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master);
700 unsigned int cs = spi->chip_select;
701
702 aspeed_spi_chip_enable(aspi, cs, false);
703
704 dev_dbg(aspi->dev, "CE%d cleanup done\n", cs);
705}
706
707static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable)
708{
709 int cs;
710
711 for (cs = 0; cs < aspi->data->max_cs; cs++)
712 aspeed_spi_chip_enable(aspi, cs, enable);
713}
714
715static int aspeed_spi_probe(struct platform_device *pdev)
716{
717 struct device *dev = &pdev->dev;
718 const struct aspeed_spi_data *data;
719 struct spi_controller *ctlr;
720 struct aspeed_spi *aspi;
721 struct resource *res;
722 int ret;
723
724 data = of_device_get_match_data(&pdev->dev);
725 if (!data)
726 return -ENODEV;
727
728 ctlr = devm_spi_alloc_master(dev, sizeof(*aspi));
729 if (!ctlr)
730 return -ENOMEM;
731
732 aspi = spi_controller_get_devdata(ctlr);
733 platform_set_drvdata(pdev, aspi);
734 aspi->data = data;
735 aspi->dev = dev;
736
737 aspi->regs = devm_platform_ioremap_resource(pdev, 0);
738 if (IS_ERR(aspi->regs))
739 return PTR_ERR(aspi->regs);
740
741 aspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
742 if (IS_ERR(aspi->ahb_base)) {
743 dev_err(dev, "missing AHB mapping window\n");
744 return PTR_ERR(aspi->ahb_base);
745 }
746
747 aspi->ahb_window_size = resource_size(res);
748 aspi->ahb_base_phy = res->start;
749
750 aspi->clk = devm_clk_get(&pdev->dev, NULL);
751 if (IS_ERR(aspi->clk)) {
752 dev_err(dev, "missing clock\n");
753 return PTR_ERR(aspi->clk);
754 }
755
756 aspi->clk_freq = clk_get_rate(aspi->clk);
757 if (!aspi->clk_freq) {
758 dev_err(dev, "invalid clock\n");
759 return -EINVAL;
760 }
761
762 ret = clk_prepare_enable(aspi->clk);
763 if (ret) {
764 dev_err(dev, "can not enable the clock\n");
765 return ret;
766 }
767
768 /* IRQ is for DMA, which the driver doesn't support yet */
769
770 ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits;
771 ctlr->bus_num = pdev->id;
772 ctlr->mem_ops = &aspeed_spi_mem_ops;
773 ctlr->setup = aspeed_spi_setup;
774 ctlr->cleanup = aspeed_spi_cleanup;
775 ctlr->num_chipselect = data->max_cs;
776 ctlr->dev.of_node = dev->of_node;
777
778 ret = devm_spi_register_controller(dev, ctlr);
779 if (ret) {
780 dev_err(&pdev->dev, "spi_register_controller failed\n");
781 goto disable_clk;
782 }
783 return 0;
784
785disable_clk:
786 clk_disable_unprepare(aspi->clk);
787 return ret;
788}
789
790static int aspeed_spi_remove(struct platform_device *pdev)
791{
792 struct aspeed_spi *aspi = platform_get_drvdata(pdev);
793
794 aspeed_spi_enable(aspi, false);
795 clk_disable_unprepare(aspi->clk);
796 return 0;
797}
798
799/*
800 * AHB mappings
801 */
802
803/*
804 * The Segment Registers of the AST2400 and AST2500 use a 8MB unit.
805 * The address range is encoded with absolute addresses in the overall
806 * mapping window.
807 */
808static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg)
809{
810 return ((reg >> 16) & 0xFF) << 23;
811}
812
813static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg)
814{
815 return ((reg >> 24) & 0xFF) << 23;
816}
817
818static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end)
819{
820 return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24);
821}
822
823/*
824 * The Segment Registers of the AST2600 use a 1MB unit. The address
825 * range is encoded with offsets in the overall mapping window.
826 */
827
828#define AST2600_SEG_ADDR_MASK 0x0ff00000
829
830static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi,
831 u32 reg)
832{
833 u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK;
834
835 return aspi->ahb_base_phy + start_offset;
836}
837
838static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi,
839 u32 reg)
840{
841 u32 end_offset = reg & AST2600_SEG_ADDR_MASK;
842
843 /* segment is disabled */
844 if (!end_offset)
845 return aspi->ahb_base_phy;
846
847 return aspi->ahb_base_phy + end_offset + 0x100000;
848}
849
850static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi,
851 u32 start, u32 end)
852{
853 /* disable zero size segments */
854 if (start == end)
855 return 0;
856
857 return ((start & AST2600_SEG_ADDR_MASK) >> 16) |
858 ((end - 1) & AST2600_SEG_ADDR_MASK);
859}
860
861/*
862 * Read timing compensation sequences
863 */
864
865#define CALIBRATE_BUF_SIZE SZ_16K
866
867static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip,
868 const u8 *golden_buf, u8 *test_buf)
869{
870 int i;
871
872 for (i = 0; i < 10; i++) {
873 memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
874 if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) {
875#if defined(VERBOSE_DEBUG)
876 print_hex_dump_bytes(DEVICE_NAME " fail: ", DUMP_PREFIX_NONE,
877 test_buf, 0x100);
878#endif
879 return false;
880 }
881 }
882 return true;
883}
884
885#define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8))
886
887/*
888 * The timing register is shared by all devices. Only update for CE0.
889 */
890static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
891 const u8 *golden_buf, u8 *test_buf)
892{
893 struct aspeed_spi *aspi = chip->aspi;
894 const struct aspeed_spi_data *data = aspi->data;
895 int i;
896 int good_pass = -1, pass_count = 0;
897 u32 shift = (hdiv - 1) << 2;
898 u32 mask = ~(0xfu << shift);
899 u32 fread_timing_val = 0;
900
901 /* Try HCLK delay 0..5, each one with/without delay and look for a
902 * good pair.
903 */
904 for (i = 0; i < 12; i++) {
905 bool pass;
906
907 if (chip->cs == 0) {
908 fread_timing_val &= mask;
909 fread_timing_val |= FREAD_TPASS(i) << shift;
910 writel(fread_timing_val, aspi->regs + data->timing);
911 }
912 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
913 dev_dbg(aspi->dev,
914 " * [%08x] %d HCLK delay, %dns DI delay : %s",
915 fread_timing_val, i / 2, (i & 1) ? 0 : 4,
916 pass ? "PASS" : "FAIL");
917 if (pass) {
918 pass_count++;
919 if (pass_count == 3) {
920 good_pass = i - 1;
921 break;
922 }
923 } else {
924 pass_count = 0;
925 }
926 }
927
928 /* No good setting for this frequency */
929 if (good_pass < 0)
930 return -1;
931
932 /* We have at least one pass of margin, let's use first pass */
933 if (chip->cs == 0) {
934 fread_timing_val &= mask;
935 fread_timing_val |= FREAD_TPASS(good_pass) << shift;
936 writel(fread_timing_val, aspi->regs + data->timing);
937 }
938 dev_dbg(aspi->dev, " * -> good is pass %d [0x%08x]",
939 good_pass, fread_timing_val);
940 return 0;
941}
942
943static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size)
944{
945 const u32 *tb32 = (const u32 *)test_buf;
946 u32 i, cnt = 0;
947
948 /* We check if we have enough words that are neither all 0
949 * nor all 1's so the calibration can be considered valid.
950 *
951 * I use an arbitrary threshold for now of 64
952 */
953 size >>= 2;
954 for (i = 0; i < size; i++) {
955 if (tb32[i] != 0 && tb32[i] != 0xffffffff)
956 cnt++;
957 }
958 return cnt >= 64;
959}
960
961static const u32 aspeed_spi_hclk_divs[] = {
962 0xf, /* HCLK */
963 0x7, /* HCLK/2 */
964 0xe, /* HCLK/3 */
965 0x6, /* HCLK/4 */
966 0xd, /* HCLK/5 */
967};
968
969#define ASPEED_SPI_HCLK_DIV(i) \
970 (aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT)
971
972static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
973{
974 struct aspeed_spi *aspi = chip->aspi;
975 const struct aspeed_spi_data *data = aspi->data;
976 u32 ahb_freq = aspi->clk_freq;
977 u32 max_freq = chip->clk_freq;
978 u32 ctl_val;
979 u8 *golden_buf = NULL;
980 u8 *test_buf = NULL;
981 int i, rc, best_div = -1;
982
983 dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz",
984 ahb_freq / 1000000);
985
986 /*
987 * use the related low frequency to get check calibration data
988 * and get golden data.
989 */
990 ctl_val = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask;
991 writel(ctl_val, chip->ctl);
992
993 test_buf = kzalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL);
994 if (!test_buf)
995 return -ENOMEM;
996
997 golden_buf = test_buf + CALIBRATE_BUF_SIZE;
998
999 memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
1000 if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) {
1001 dev_info(aspi->dev, "Calibration area too uniform, using low speed");
1002 goto no_calib;
1003 }
1004
1005#if defined(VERBOSE_DEBUG)
1006 print_hex_dump_bytes(DEVICE_NAME " good: ", DUMP_PREFIX_NONE,
1007 golden_buf, 0x100);
1008#endif
1009
1010 /* Now we iterate the HCLK dividers until we find our breaking point */
1011 for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) {
1012 u32 tv, freq;
1013
1014 freq = ahb_freq / i;
1015 if (freq > max_freq)
1016 continue;
1017
1018 /* Set the timing */
1019 tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i);
1020 writel(tv, chip->ctl);
1021 dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv);
1022 rc = data->calibrate(chip, i, golden_buf, test_buf);
1023 if (rc == 0)
1024 best_div = i;
1025 }
1026
1027 /* Nothing found ? */
1028 if (best_div < 0) {
1029 dev_warn(aspi->dev, "No good frequency, using dumb slow");
1030 } else {
1031 dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div);
1032
1033 /* Record the freq */
1034 for (i = 0; i < ASPEED_SPI_MAX; i++)
1035 chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) |
1036 ASPEED_SPI_HCLK_DIV(best_div);
1037 }
1038
1039no_calib:
1040 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
1041 kfree(test_buf);
1042 return 0;
1043}
1044
1045#define TIMING_DELAY_DI BIT(3)
1046#define TIMING_DELAY_HCYCLE_MAX 5
1047#define TIMING_REG_AST2600(chip) \
1048 ((chip)->aspi->regs + (chip)->aspi->data->timing + \
1049 (chip)->cs * 4)
1050
1051static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
1052 const u8 *golden_buf, u8 *test_buf)
1053{
1054 struct aspeed_spi *aspi = chip->aspi;
1055 int hcycle;
1056 u32 shift = (hdiv - 2) << 3;
1057 u32 mask = ~(0xfu << shift);
1058 u32 fread_timing_val = 0;
1059
1060 for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) {
1061 int delay_ns;
1062 bool pass = false;
1063
1064 fread_timing_val &= mask;
1065 fread_timing_val |= hcycle << shift;
1066
1067 /* no DI input delay first */
1068 writel(fread_timing_val, TIMING_REG_AST2600(chip));
1069 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
1070 dev_dbg(aspi->dev,
1071 " * [%08x] %d HCLK delay, DI delay none : %s",
1072 fread_timing_val, hcycle, pass ? "PASS" : "FAIL");
1073 if (pass)
1074 return 0;
1075
1076 /* Add DI input delays */
1077 fread_timing_val &= mask;
1078 fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift;
1079
1080 for (delay_ns = 0; delay_ns < 0x10; delay_ns++) {
1081 fread_timing_val &= ~(0xf << (4 + shift));
1082 fread_timing_val |= delay_ns << (4 + shift);
1083
1084 writel(fread_timing_val, TIMING_REG_AST2600(chip));
1085 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
1086 dev_dbg(aspi->dev,
1087 " * [%08x] %d HCLK delay, DI delay %d.%dns : %s",
1088 fread_timing_val, hcycle, (delay_ns + 1) / 2,
1089 (delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL");
1090 /*
1091 * TODO: This is optimistic. We should look
1092 * for a working interval and save the middle
1093 * value in the read timing register.
1094 */
1095 if (pass)
1096 return 0;
1097 }
1098 }
1099
1100 /* No good setting for this frequency */
1101 return -1;
1102}
1103
1104/*
1105 * Platform definitions
1106 */
1107static const struct aspeed_spi_data ast2400_fmc_data = {
1108 .max_cs = 5,
1109 .hastype = true,
1110 .we0 = 16,
1111 .ctl0 = CE0_CTRL_REG,
1112 .timing = CE0_TIMING_COMPENSATION_REG,
1113 .hclk_mask = 0xfffff0ff,
1114 .hdiv_max = 1,
1115 .calibrate = aspeed_spi_calibrate,
1116 .segment_start = aspeed_spi_segment_start,
1117 .segment_end = aspeed_spi_segment_end,
1118 .segment_reg = aspeed_spi_segment_reg,
1119};
1120
1121static const struct aspeed_spi_data ast2400_spi_data = {
1122 .max_cs = 1,
1123 .hastype = false,
1124 .we0 = 0,
1125 .ctl0 = 0x04,
1126 .timing = 0x14,
1127 .hclk_mask = 0xfffff0ff,
1128 .hdiv_max = 1,
1129 .calibrate = aspeed_spi_calibrate,
1130 /* No segment registers */
1131};
1132
1133static const struct aspeed_spi_data ast2500_fmc_data = {
1134 .max_cs = 3,
1135 .hastype = true,
1136 .we0 = 16,
1137 .ctl0 = CE0_CTRL_REG,
1138 .timing = CE0_TIMING_COMPENSATION_REG,
1139 .hclk_mask = 0xffffd0ff,
1140 .hdiv_max = 1,
1141 .calibrate = aspeed_spi_calibrate,
1142 .segment_start = aspeed_spi_segment_start,
1143 .segment_end = aspeed_spi_segment_end,
1144 .segment_reg = aspeed_spi_segment_reg,
1145};
1146
1147static const struct aspeed_spi_data ast2500_spi_data = {
1148 .max_cs = 2,
1149 .hastype = false,
1150 .we0 = 16,
1151 .ctl0 = CE0_CTRL_REG,
1152 .timing = CE0_TIMING_COMPENSATION_REG,
1153 .hclk_mask = 0xffffd0ff,
1154 .hdiv_max = 1,
1155 .calibrate = aspeed_spi_calibrate,
1156 .segment_start = aspeed_spi_segment_start,
1157 .segment_end = aspeed_spi_segment_end,
1158 .segment_reg = aspeed_spi_segment_reg,
1159};
1160
1161static const struct aspeed_spi_data ast2600_fmc_data = {
1162 .max_cs = 3,
1163 .hastype = false,
1164 .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
1165 .we0 = 16,
1166 .ctl0 = CE0_CTRL_REG,
1167 .timing = CE0_TIMING_COMPENSATION_REG,
1168 .hclk_mask = 0xf0fff0ff,
1169 .hdiv_max = 2,
1170 .calibrate = aspeed_spi_ast2600_calibrate,
1171 .segment_start = aspeed_spi_segment_ast2600_start,
1172 .segment_end = aspeed_spi_segment_ast2600_end,
1173 .segment_reg = aspeed_spi_segment_ast2600_reg,
1174};
1175
1176static const struct aspeed_spi_data ast2600_spi_data = {
1177 .max_cs = 2,
1178 .hastype = false,
1179 .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
1180 .we0 = 16,
1181 .ctl0 = CE0_CTRL_REG,
1182 .timing = CE0_TIMING_COMPENSATION_REG,
1183 .hclk_mask = 0xf0fff0ff,
1184 .hdiv_max = 2,
1185 .calibrate = aspeed_spi_ast2600_calibrate,
1186 .segment_start = aspeed_spi_segment_ast2600_start,
1187 .segment_end = aspeed_spi_segment_ast2600_end,
1188 .segment_reg = aspeed_spi_segment_ast2600_reg,
1189};
1190
1191static const struct of_device_id aspeed_spi_matches[] = {
1192 { .compatible = "aspeed,ast2400-fmc", .data = &ast2400_fmc_data },
1193 { .compatible = "aspeed,ast2400-spi", .data = &ast2400_spi_data },
1194 { .compatible = "aspeed,ast2500-fmc", .data = &ast2500_fmc_data },
1195 { .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data },
1196 { .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data },
1197 { .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data },
1198 { }
1199};
1200MODULE_DEVICE_TABLE(of, aspeed_spi_matches);
1201
1202static struct platform_driver aspeed_spi_driver = {
1203 .probe = aspeed_spi_probe,
1204 .remove = aspeed_spi_remove,
1205 .driver = {
1206 .name = DEVICE_NAME,
1207 .of_match_table = aspeed_spi_matches,
1208 }
1209};
1210
1211module_platform_driver(aspeed_spi_driver);
1212
1213MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
1214MODULE_AUTHOR("Chin-Ting Kuo <chin-ting_kuo@aspeedtech.com>");
1215MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>");
1216MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * ASPEED FMC/SPI Memory Controller Driver
4 *
5 * Copyright (c) 2015-2022, IBM Corporation.
6 * Copyright (c) 2020, ASPEED Corporation.
7 */
8
9#include <linux/clk.h>
10#include <linux/module.h>
11#include <linux/of.h>
12#include <linux/of_platform.h>
13#include <linux/platform_device.h>
14#include <linux/spi/spi.h>
15#include <linux/spi/spi-mem.h>
16
17#define DEVICE_NAME "spi-aspeed-smc"
18
19/* Type setting Register */
20#define CONFIG_REG 0x0
21#define CONFIG_TYPE_SPI 0x2
22
23/* CE Control Register */
24#define CE_CTRL_REG 0x4
25
26/* CEx Control Register */
27#define CE0_CTRL_REG 0x10
28#define CTRL_IO_MODE_MASK GENMASK(30, 28)
29#define CTRL_IO_SINGLE_DATA 0x0
30#define CTRL_IO_DUAL_DATA BIT(29)
31#define CTRL_IO_QUAD_DATA BIT(30)
32#define CTRL_COMMAND_SHIFT 16
33#define CTRL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI only */
34#define CTRL_IO_DUMMY_SET(dummy) \
35 (((((dummy) >> 2) & 0x1) << 14) | (((dummy) & 0x3) << 6))
36#define CTRL_FREQ_SEL_SHIFT 8
37#define CTRL_FREQ_SEL_MASK GENMASK(11, CTRL_FREQ_SEL_SHIFT)
38#define CTRL_CE_STOP_ACTIVE BIT(2)
39#define CTRL_IO_MODE_CMD_MASK GENMASK(1, 0)
40#define CTRL_IO_MODE_NORMAL 0x0
41#define CTRL_IO_MODE_READ 0x1
42#define CTRL_IO_MODE_WRITE 0x2
43#define CTRL_IO_MODE_USER 0x3
44
45#define CTRL_IO_CMD_MASK 0xf0ff40c3
46
47/* CEx Address Decoding Range Register */
48#define CE0_SEGMENT_ADDR_REG 0x30
49
50/* CEx Read timing compensation register */
51#define CE0_TIMING_COMPENSATION_REG 0x94
52
53enum aspeed_spi_ctl_reg_value {
54 ASPEED_SPI_BASE,
55 ASPEED_SPI_READ,
56 ASPEED_SPI_WRITE,
57 ASPEED_SPI_MAX,
58};
59
60struct aspeed_spi;
61
62struct aspeed_spi_chip {
63 struct aspeed_spi *aspi;
64 u32 cs;
65 void __iomem *ctl;
66 void __iomem *ahb_base;
67 u32 ahb_window_size;
68 u32 ctl_val[ASPEED_SPI_MAX];
69 u32 clk_freq;
70};
71
72struct aspeed_spi_data {
73 u32 ctl0;
74 u32 max_cs;
75 bool hastype;
76 u32 mode_bits;
77 u32 we0;
78 u32 timing;
79 u32 hclk_mask;
80 u32 hdiv_max;
81
82 u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg);
83 u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg);
84 u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end);
85 int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv,
86 const u8 *golden_buf, u8 *test_buf);
87};
88
89#define ASPEED_SPI_MAX_NUM_CS 5
90
91struct aspeed_spi {
92 const struct aspeed_spi_data *data;
93
94 void __iomem *regs;
95 void __iomem *ahb_base;
96 u32 ahb_base_phy;
97 u32 ahb_window_size;
98 struct device *dev;
99
100 struct clk *clk;
101 u32 clk_freq;
102
103 struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS];
104};
105
106static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op)
107{
108 switch (op->data.buswidth) {
109 case 1:
110 return CTRL_IO_SINGLE_DATA;
111 case 2:
112 return CTRL_IO_DUAL_DATA;
113 case 4:
114 return CTRL_IO_QUAD_DATA;
115 default:
116 return CTRL_IO_SINGLE_DATA;
117 }
118}
119
120static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode)
121{
122 u32 ctl;
123
124 if (io_mode > 0) {
125 ctl = readl(chip->ctl) & ~CTRL_IO_MODE_MASK;
126 ctl |= io_mode;
127 writel(ctl, chip->ctl);
128 }
129}
130
131static void aspeed_spi_start_user(struct aspeed_spi_chip *chip)
132{
133 u32 ctl = chip->ctl_val[ASPEED_SPI_BASE];
134
135 ctl |= CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
136 writel(ctl, chip->ctl);
137
138 ctl &= ~CTRL_CE_STOP_ACTIVE;
139 writel(ctl, chip->ctl);
140}
141
142static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip)
143{
144 u32 ctl = chip->ctl_val[ASPEED_SPI_READ] |
145 CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
146
147 writel(ctl, chip->ctl);
148
149 /* Restore defaults */
150 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
151}
152
153static int aspeed_spi_read_from_ahb(void *buf, void __iomem *src, size_t len)
154{
155 size_t offset = 0;
156
157 if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
158 IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
159 ioread32_rep(src, buf, len >> 2);
160 offset = len & ~0x3;
161 len -= offset;
162 }
163 ioread8_rep(src, (u8 *)buf + offset, len);
164 return 0;
165}
166
167static int aspeed_spi_write_to_ahb(void __iomem *dst, const void *buf, size_t len)
168{
169 size_t offset = 0;
170
171 if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
172 IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
173 iowrite32_rep(dst, buf, len >> 2);
174 offset = len & ~0x3;
175 len -= offset;
176 }
177 iowrite8_rep(dst, (const u8 *)buf + offset, len);
178 return 0;
179}
180
181static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes,
182 u64 offset, u32 opcode)
183{
184 __be32 temp;
185 u32 cmdaddr;
186
187 switch (addr_nbytes) {
188 case 3:
189 cmdaddr = offset & 0xFFFFFF;
190 cmdaddr |= opcode << 24;
191
192 temp = cpu_to_be32(cmdaddr);
193 aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
194 break;
195 case 4:
196 temp = cpu_to_be32(offset);
197 aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1);
198 aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
199 break;
200 default:
201 WARN_ONCE(1, "Unexpected address width %u", addr_nbytes);
202 return -EOPNOTSUPP;
203 }
204 return 0;
205}
206
207static int aspeed_spi_read_reg(struct aspeed_spi_chip *chip,
208 const struct spi_mem_op *op)
209{
210 aspeed_spi_start_user(chip);
211 aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
212 aspeed_spi_read_from_ahb(op->data.buf.in,
213 chip->ahb_base, op->data.nbytes);
214 aspeed_spi_stop_user(chip);
215 return 0;
216}
217
218static int aspeed_spi_write_reg(struct aspeed_spi_chip *chip,
219 const struct spi_mem_op *op)
220{
221 aspeed_spi_start_user(chip);
222 aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
223 aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out,
224 op->data.nbytes);
225 aspeed_spi_stop_user(chip);
226 return 0;
227}
228
229static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
230 const struct spi_mem_op *op,
231 u64 offset, size_t len, void *buf)
232{
233 int io_mode = aspeed_spi_get_io_mode(op);
234 u8 dummy = 0xFF;
235 int i;
236 int ret;
237
238 aspeed_spi_start_user(chip);
239
240 ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode);
241 if (ret < 0)
242 goto stop_user;
243
244 if (op->dummy.buswidth && op->dummy.nbytes) {
245 for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
246 aspeed_spi_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
247 }
248
249 aspeed_spi_set_io_mode(chip, io_mode);
250
251 aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
252stop_user:
253 aspeed_spi_stop_user(chip);
254 return ret;
255}
256
257static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
258 const struct spi_mem_op *op)
259{
260 int ret;
261
262 aspeed_spi_start_user(chip);
263 ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode);
264 if (ret < 0)
265 goto stop_user;
266 aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
267stop_user:
268 aspeed_spi_stop_user(chip);
269 return ret;
270}
271
272/* support for 1-1-1, 1-1-2 or 1-1-4 */
273static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
274{
275 if (op->cmd.buswidth > 1)
276 return false;
277
278 if (op->addr.nbytes != 0) {
279 if (op->addr.buswidth > 1)
280 return false;
281 if (op->addr.nbytes < 3 || op->addr.nbytes > 4)
282 return false;
283 }
284
285 if (op->dummy.nbytes != 0) {
286 if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7)
287 return false;
288 }
289
290 if (op->data.nbytes != 0 && op->data.buswidth > 4)
291 return false;
292
293 return spi_mem_default_supports_op(mem, op);
294}
295
296static const struct aspeed_spi_data ast2400_spi_data;
297
298static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
299{
300 struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
301 struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)];
302 u32 addr_mode, addr_mode_backup;
303 u32 ctl_val;
304 int ret = 0;
305
306 dev_dbg(aspi->dev,
307 "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
308 chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
309 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
310 op->dummy.buswidth, op->data.buswidth,
311 op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
312
313 addr_mode = readl(aspi->regs + CE_CTRL_REG);
314 addr_mode_backup = addr_mode;
315
316 ctl_val = chip->ctl_val[ASPEED_SPI_BASE];
317 ctl_val &= ~CTRL_IO_CMD_MASK;
318
319 ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT;
320
321 /* 4BYTE address mode */
322 if (op->addr.nbytes) {
323 if (op->addr.nbytes == 4)
324 addr_mode |= (0x11 << chip->cs);
325 else
326 addr_mode &= ~(0x11 << chip->cs);
327
328 if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
329 ctl_val |= CTRL_IO_ADDRESS_4B;
330 }
331
332 if (op->dummy.nbytes)
333 ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
334
335 if (op->data.nbytes)
336 ctl_val |= aspeed_spi_get_io_mode(op);
337
338 if (op->data.dir == SPI_MEM_DATA_OUT)
339 ctl_val |= CTRL_IO_MODE_WRITE;
340 else
341 ctl_val |= CTRL_IO_MODE_READ;
342
343 if (addr_mode != addr_mode_backup)
344 writel(addr_mode, aspi->regs + CE_CTRL_REG);
345 writel(ctl_val, chip->ctl);
346
347 if (op->data.dir == SPI_MEM_DATA_IN) {
348 if (!op->addr.nbytes)
349 ret = aspeed_spi_read_reg(chip, op);
350 else
351 ret = aspeed_spi_read_user(chip, op, op->addr.val,
352 op->data.nbytes, op->data.buf.in);
353 } else {
354 if (!op->addr.nbytes)
355 ret = aspeed_spi_write_reg(chip, op);
356 else
357 ret = aspeed_spi_write_user(chip, op);
358 }
359
360 /* Restore defaults */
361 if (addr_mode != addr_mode_backup)
362 writel(addr_mode_backup, aspi->regs + CE_CTRL_REG);
363 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
364 return ret;
365}
366
367static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
368{
369 int ret;
370
371 ret = do_aspeed_spi_exec_op(mem, op);
372 if (ret)
373 dev_err(&mem->spi->dev, "operation failed: %d\n", ret);
374 return ret;
375}
376
377static const char *aspeed_spi_get_name(struct spi_mem *mem)
378{
379 struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
380 struct device *dev = aspi->dev;
381
382 return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
383 spi_get_chipselect(mem->spi, 0));
384}
385
386struct aspeed_spi_window {
387 u32 cs;
388 u32 offset;
389 u32 size;
390};
391
392static void aspeed_spi_get_windows(struct aspeed_spi *aspi,
393 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS])
394{
395 const struct aspeed_spi_data *data = aspi->data;
396 u32 reg_val;
397 u32 cs;
398
399 for (cs = 0; cs < aspi->data->max_cs; cs++) {
400 reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4);
401 windows[cs].cs = cs;
402 windows[cs].size = data->segment_end(aspi, reg_val) -
403 data->segment_start(aspi, reg_val);
404 windows[cs].offset = data->segment_start(aspi, reg_val) - aspi->ahb_base_phy;
405 dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs,
406 windows[cs].offset, windows[cs].size);
407 }
408}
409
410/*
411 * On the AST2600, some CE windows are closed by default at reset but
412 * U-Boot should open all.
413 */
414static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip)
415{
416 struct aspeed_spi *aspi = chip->aspi;
417 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
418 struct aspeed_spi_window *win = &windows[chip->cs];
419
420 /* No segment registers for the AST2400 SPI controller */
421 if (aspi->data == &ast2400_spi_data) {
422 win->offset = 0;
423 win->size = aspi->ahb_window_size;
424 } else {
425 aspeed_spi_get_windows(aspi, windows);
426 }
427
428 chip->ahb_base = aspi->ahb_base + win->offset;
429 chip->ahb_window_size = win->size;
430
431 dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB",
432 chip->cs, aspi->ahb_base_phy + win->offset,
433 aspi->ahb_base_phy + win->offset + win->size - 1,
434 win->size >> 20);
435
436 return chip->ahb_window_size ? 0 : -1;
437}
438
439static int aspeed_spi_set_window(struct aspeed_spi *aspi,
440 const struct aspeed_spi_window *win)
441{
442 u32 start = aspi->ahb_base_phy + win->offset;
443 u32 end = start + win->size;
444 void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4;
445 u32 seg_val_backup = readl(seg_reg);
446 u32 seg_val = aspi->data->segment_reg(aspi, start, end);
447
448 if (seg_val == seg_val_backup)
449 return 0;
450
451 writel(seg_val, seg_reg);
452
453 /*
454 * Restore initial value if something goes wrong else we could
455 * loose access to the chip.
456 */
457 if (seg_val != readl(seg_reg)) {
458 dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB",
459 win->cs, start, end - 1, win->size >> 20);
460 writel(seg_val_backup, seg_reg);
461 return -EIO;
462 }
463
464 if (win->size)
465 dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB",
466 win->cs, start, end - 1, win->size >> 20);
467 else
468 dev_dbg(aspi->dev, "CE%d window closed", win->cs);
469
470 return 0;
471}
472
473/*
474 * Yet to be done when possible :
475 * - Align mappings on flash size (we don't have the info)
476 * - ioremap each window, not strictly necessary since the overall window
477 * is correct.
478 */
479static const struct aspeed_spi_data ast2500_spi_data;
480static const struct aspeed_spi_data ast2600_spi_data;
481static const struct aspeed_spi_data ast2600_fmc_data;
482
483static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip,
484 u32 local_offset, u32 size)
485{
486 struct aspeed_spi *aspi = chip->aspi;
487 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
488 struct aspeed_spi_window *win = &windows[chip->cs];
489 int ret;
490
491 /* No segment registers for the AST2400 SPI controller */
492 if (aspi->data == &ast2400_spi_data)
493 return 0;
494
495 /*
496 * Due to an HW issue on the AST2500 SPI controller, the CE0
497 * window size should be smaller than the maximum 128MB.
498 */
499 if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) {
500 size = 120 << 20;
501 dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)",
502 chip->cs, size >> 20);
503 }
504
505 /*
506 * The decoding size of AST2600 SPI controller should set at
507 * least 2MB.
508 */
509 if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) &&
510 size < SZ_2M) {
511 size = SZ_2M;
512 dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)",
513 chip->cs, size >> 20);
514 }
515
516 aspeed_spi_get_windows(aspi, windows);
517
518 /* Adjust this chip window */
519 win->offset += local_offset;
520 win->size = size;
521
522 if (win->offset + win->size > aspi->ahb_window_size) {
523 win->size = aspi->ahb_window_size - win->offset;
524 dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20);
525 }
526
527 ret = aspeed_spi_set_window(aspi, win);
528 if (ret)
529 return ret;
530
531 /* Update chip mapping info */
532 chip->ahb_base = aspi->ahb_base + win->offset;
533 chip->ahb_window_size = win->size;
534
535 /*
536 * Also adjust next chip window to make sure that it does not
537 * overlap with the current window.
538 */
539 if (chip->cs < aspi->data->max_cs - 1) {
540 struct aspeed_spi_window *next = &windows[chip->cs + 1];
541
542 /* Change offset and size to keep the same end address */
543 if ((next->offset + next->size) > (win->offset + win->size))
544 next->size = (next->offset + next->size) - (win->offset + win->size);
545 else
546 next->size = 0;
547 next->offset = win->offset + win->size;
548
549 aspeed_spi_set_window(aspi, next);
550 }
551 return 0;
552}
553
554static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip);
555
556static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
557{
558 struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
559 struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
560 struct spi_mem_op *op = &desc->info.op_tmpl;
561 u32 ctl_val;
562 int ret = 0;
563
564 dev_dbg(aspi->dev,
565 "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n",
566 chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
567 desc->info.offset, desc->info.offset + desc->info.length,
568 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
569 op->dummy.buswidth, op->data.buswidth,
570 op->addr.nbytes, op->dummy.nbytes);
571
572 chip->clk_freq = desc->mem->spi->max_speed_hz;
573
574 /* Only for reads */
575 if (op->data.dir != SPI_MEM_DATA_IN)
576 return -EOPNOTSUPP;
577
578 aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length);
579
580 if (desc->info.length > chip->ahb_window_size)
581 dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping",
582 chip->cs, chip->ahb_window_size >> 20);
583
584 /* Define the default IO read settings */
585 ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
586 ctl_val |= aspeed_spi_get_io_mode(op) |
587 op->cmd.opcode << CTRL_COMMAND_SHIFT |
588 CTRL_IO_MODE_READ;
589
590 if (op->dummy.nbytes)
591 ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
592
593 /* Tune 4BYTE address mode */
594 if (op->addr.nbytes) {
595 u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
596
597 if (op->addr.nbytes == 4)
598 addr_mode |= (0x11 << chip->cs);
599 else
600 addr_mode &= ~(0x11 << chip->cs);
601 writel(addr_mode, aspi->regs + CE_CTRL_REG);
602
603 /* AST2400 SPI controller sets 4BYTE address mode in
604 * CE0 Control Register
605 */
606 if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
607 ctl_val |= CTRL_IO_ADDRESS_4B;
608 }
609
610 /* READ mode is the controller default setting */
611 chip->ctl_val[ASPEED_SPI_READ] = ctl_val;
612 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
613
614 ret = aspeed_spi_do_calibration(chip);
615
616 dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n",
617 chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]);
618
619 return ret;
620}
621
622static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
623 u64 offset, size_t len, void *buf)
624{
625 struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
626 struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
627
628 /* Switch to USER command mode if mapping window is too small */
629 if (chip->ahb_window_size < offset + len) {
630 int ret;
631
632 ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf);
633 if (ret < 0)
634 return ret;
635 } else {
636 memcpy_fromio(buf, chip->ahb_base + offset, len);
637 }
638
639 return len;
640}
641
642static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
643 .supports_op = aspeed_spi_supports_op,
644 .exec_op = aspeed_spi_exec_op,
645 .get_name = aspeed_spi_get_name,
646 .dirmap_create = aspeed_spi_dirmap_create,
647 .dirmap_read = aspeed_spi_dirmap_read,
648};
649
650static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type)
651{
652 u32 reg;
653
654 reg = readl(aspi->regs + CONFIG_REG);
655 reg &= ~(0x3 << (cs * 2));
656 reg |= type << (cs * 2);
657 writel(reg, aspi->regs + CONFIG_REG);
658}
659
660static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, bool enable)
661{
662 u32 we_bit = BIT(aspi->data->we0 + cs);
663 u32 reg = readl(aspi->regs + CONFIG_REG);
664
665 if (enable)
666 reg |= we_bit;
667 else
668 reg &= ~we_bit;
669 writel(reg, aspi->regs + CONFIG_REG);
670}
671
672static int aspeed_spi_setup(struct spi_device *spi)
673{
674 struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
675 const struct aspeed_spi_data *data = aspi->data;
676 unsigned int cs = spi_get_chipselect(spi, 0);
677 struct aspeed_spi_chip *chip = &aspi->chips[cs];
678
679 chip->aspi = aspi;
680 chip->cs = cs;
681 chip->ctl = aspi->regs + data->ctl0 + cs * 4;
682
683 /* The driver only supports SPI type flash */
684 if (data->hastype)
685 aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI);
686
687 if (aspeed_spi_chip_set_default_window(chip) < 0) {
688 dev_warn(aspi->dev, "CE%d window invalid", cs);
689 return -EINVAL;
690 }
691
692 aspeed_spi_chip_enable(aspi, cs, true);
693
694 chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER;
695
696 dev_dbg(aspi->dev, "CE%d setup done\n", cs);
697 return 0;
698}
699
700static void aspeed_spi_cleanup(struct spi_device *spi)
701{
702 struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
703 unsigned int cs = spi_get_chipselect(spi, 0);
704
705 aspeed_spi_chip_enable(aspi, cs, false);
706
707 dev_dbg(aspi->dev, "CE%d cleanup done\n", cs);
708}
709
710static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable)
711{
712 int cs;
713
714 for (cs = 0; cs < aspi->data->max_cs; cs++)
715 aspeed_spi_chip_enable(aspi, cs, enable);
716}
717
718static int aspeed_spi_probe(struct platform_device *pdev)
719{
720 struct device *dev = &pdev->dev;
721 const struct aspeed_spi_data *data;
722 struct spi_controller *ctlr;
723 struct aspeed_spi *aspi;
724 struct resource *res;
725 int ret;
726
727 data = of_device_get_match_data(&pdev->dev);
728 if (!data)
729 return -ENODEV;
730
731 ctlr = devm_spi_alloc_host(dev, sizeof(*aspi));
732 if (!ctlr)
733 return -ENOMEM;
734
735 aspi = spi_controller_get_devdata(ctlr);
736 platform_set_drvdata(pdev, aspi);
737 aspi->data = data;
738 aspi->dev = dev;
739
740 aspi->regs = devm_platform_ioremap_resource(pdev, 0);
741 if (IS_ERR(aspi->regs))
742 return PTR_ERR(aspi->regs);
743
744 aspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
745 if (IS_ERR(aspi->ahb_base)) {
746 dev_err(dev, "missing AHB mapping window\n");
747 return PTR_ERR(aspi->ahb_base);
748 }
749
750 aspi->ahb_window_size = resource_size(res);
751 aspi->ahb_base_phy = res->start;
752
753 aspi->clk = devm_clk_get_enabled(&pdev->dev, NULL);
754 if (IS_ERR(aspi->clk)) {
755 dev_err(dev, "missing clock\n");
756 return PTR_ERR(aspi->clk);
757 }
758
759 aspi->clk_freq = clk_get_rate(aspi->clk);
760 if (!aspi->clk_freq) {
761 dev_err(dev, "invalid clock\n");
762 return -EINVAL;
763 }
764
765 /* IRQ is for DMA, which the driver doesn't support yet */
766
767 ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits;
768 ctlr->bus_num = pdev->id;
769 ctlr->mem_ops = &aspeed_spi_mem_ops;
770 ctlr->setup = aspeed_spi_setup;
771 ctlr->cleanup = aspeed_spi_cleanup;
772 ctlr->num_chipselect = data->max_cs;
773 ctlr->dev.of_node = dev->of_node;
774
775 ret = devm_spi_register_controller(dev, ctlr);
776 if (ret)
777 dev_err(&pdev->dev, "spi_register_controller failed\n");
778
779 return ret;
780}
781
782static void aspeed_spi_remove(struct platform_device *pdev)
783{
784 struct aspeed_spi *aspi = platform_get_drvdata(pdev);
785
786 aspeed_spi_enable(aspi, false);
787}
788
789/*
790 * AHB mappings
791 */
792
793/*
794 * The Segment Registers of the AST2400 and AST2500 use a 8MB unit.
795 * The address range is encoded with absolute addresses in the overall
796 * mapping window.
797 */
798static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg)
799{
800 return ((reg >> 16) & 0xFF) << 23;
801}
802
803static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg)
804{
805 return ((reg >> 24) & 0xFF) << 23;
806}
807
808static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end)
809{
810 return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24);
811}
812
813/*
814 * The Segment Registers of the AST2600 use a 1MB unit. The address
815 * range is encoded with offsets in the overall mapping window.
816 */
817
818#define AST2600_SEG_ADDR_MASK 0x0ff00000
819
820static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi,
821 u32 reg)
822{
823 u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK;
824
825 return aspi->ahb_base_phy + start_offset;
826}
827
828static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi,
829 u32 reg)
830{
831 u32 end_offset = reg & AST2600_SEG_ADDR_MASK;
832
833 /* segment is disabled */
834 if (!end_offset)
835 return aspi->ahb_base_phy;
836
837 return aspi->ahb_base_phy + end_offset + 0x100000;
838}
839
840static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi,
841 u32 start, u32 end)
842{
843 /* disable zero size segments */
844 if (start == end)
845 return 0;
846
847 return ((start & AST2600_SEG_ADDR_MASK) >> 16) |
848 ((end - 1) & AST2600_SEG_ADDR_MASK);
849}
850
851/*
852 * Read timing compensation sequences
853 */
854
855#define CALIBRATE_BUF_SIZE SZ_16K
856
857static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip,
858 const u8 *golden_buf, u8 *test_buf)
859{
860 int i;
861
862 for (i = 0; i < 10; i++) {
863 memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
864 if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) {
865#if defined(VERBOSE_DEBUG)
866 print_hex_dump_bytes(DEVICE_NAME " fail: ", DUMP_PREFIX_NONE,
867 test_buf, 0x100);
868#endif
869 return false;
870 }
871 }
872 return true;
873}
874
875#define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8))
876
877/*
878 * The timing register is shared by all devices. Only update for CE0.
879 */
880static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
881 const u8 *golden_buf, u8 *test_buf)
882{
883 struct aspeed_spi *aspi = chip->aspi;
884 const struct aspeed_spi_data *data = aspi->data;
885 int i;
886 int good_pass = -1, pass_count = 0;
887 u32 shift = (hdiv - 1) << 2;
888 u32 mask = ~(0xfu << shift);
889 u32 fread_timing_val = 0;
890
891 /* Try HCLK delay 0..5, each one with/without delay and look for a
892 * good pair.
893 */
894 for (i = 0; i < 12; i++) {
895 bool pass;
896
897 if (chip->cs == 0) {
898 fread_timing_val &= mask;
899 fread_timing_val |= FREAD_TPASS(i) << shift;
900 writel(fread_timing_val, aspi->regs + data->timing);
901 }
902 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
903 dev_dbg(aspi->dev,
904 " * [%08x] %d HCLK delay, %dns DI delay : %s",
905 fread_timing_val, i / 2, (i & 1) ? 0 : 4,
906 pass ? "PASS" : "FAIL");
907 if (pass) {
908 pass_count++;
909 if (pass_count == 3) {
910 good_pass = i - 1;
911 break;
912 }
913 } else {
914 pass_count = 0;
915 }
916 }
917
918 /* No good setting for this frequency */
919 if (good_pass < 0)
920 return -1;
921
922 /* We have at least one pass of margin, let's use first pass */
923 if (chip->cs == 0) {
924 fread_timing_val &= mask;
925 fread_timing_val |= FREAD_TPASS(good_pass) << shift;
926 writel(fread_timing_val, aspi->regs + data->timing);
927 }
928 dev_dbg(aspi->dev, " * -> good is pass %d [0x%08x]",
929 good_pass, fread_timing_val);
930 return 0;
931}
932
933static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size)
934{
935 const u32 *tb32 = (const u32 *)test_buf;
936 u32 i, cnt = 0;
937
938 /* We check if we have enough words that are neither all 0
939 * nor all 1's so the calibration can be considered valid.
940 *
941 * I use an arbitrary threshold for now of 64
942 */
943 size >>= 2;
944 for (i = 0; i < size; i++) {
945 if (tb32[i] != 0 && tb32[i] != 0xffffffff)
946 cnt++;
947 }
948 return cnt >= 64;
949}
950
951static const u32 aspeed_spi_hclk_divs[] = {
952 0xf, /* HCLK */
953 0x7, /* HCLK/2 */
954 0xe, /* HCLK/3 */
955 0x6, /* HCLK/4 */
956 0xd, /* HCLK/5 */
957};
958
959#define ASPEED_SPI_HCLK_DIV(i) \
960 (aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT)
961
962static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
963{
964 struct aspeed_spi *aspi = chip->aspi;
965 const struct aspeed_spi_data *data = aspi->data;
966 u32 ahb_freq = aspi->clk_freq;
967 u32 max_freq = chip->clk_freq;
968 u32 ctl_val;
969 u8 *golden_buf = NULL;
970 u8 *test_buf = NULL;
971 int i, rc, best_div = -1;
972
973 dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz",
974 ahb_freq / 1000000);
975
976 /*
977 * use the related low frequency to get check calibration data
978 * and get golden data.
979 */
980 ctl_val = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask;
981 writel(ctl_val, chip->ctl);
982
983 test_buf = kzalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL);
984 if (!test_buf)
985 return -ENOMEM;
986
987 golden_buf = test_buf + CALIBRATE_BUF_SIZE;
988
989 memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
990 if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) {
991 dev_info(aspi->dev, "Calibration area too uniform, using low speed");
992 goto no_calib;
993 }
994
995#if defined(VERBOSE_DEBUG)
996 print_hex_dump_bytes(DEVICE_NAME " good: ", DUMP_PREFIX_NONE,
997 golden_buf, 0x100);
998#endif
999
1000 /* Now we iterate the HCLK dividers until we find our breaking point */
1001 for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) {
1002 u32 tv, freq;
1003
1004 freq = ahb_freq / i;
1005 if (freq > max_freq)
1006 continue;
1007
1008 /* Set the timing */
1009 tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i);
1010 writel(tv, chip->ctl);
1011 dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv);
1012 rc = data->calibrate(chip, i, golden_buf, test_buf);
1013 if (rc == 0)
1014 best_div = i;
1015 }
1016
1017 /* Nothing found ? */
1018 if (best_div < 0) {
1019 dev_warn(aspi->dev, "No good frequency, using dumb slow");
1020 } else {
1021 dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div);
1022
1023 /* Record the freq */
1024 for (i = 0; i < ASPEED_SPI_MAX; i++)
1025 chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) |
1026 ASPEED_SPI_HCLK_DIV(best_div);
1027 }
1028
1029no_calib:
1030 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
1031 kfree(test_buf);
1032 return 0;
1033}
1034
1035#define TIMING_DELAY_DI BIT(3)
1036#define TIMING_DELAY_HCYCLE_MAX 5
1037#define TIMING_REG_AST2600(chip) \
1038 ((chip)->aspi->regs + (chip)->aspi->data->timing + \
1039 (chip)->cs * 4)
1040
1041static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
1042 const u8 *golden_buf, u8 *test_buf)
1043{
1044 struct aspeed_spi *aspi = chip->aspi;
1045 int hcycle;
1046 u32 shift = (hdiv - 2) << 3;
1047 u32 mask = ~(0xfu << shift);
1048 u32 fread_timing_val = 0;
1049
1050 for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) {
1051 int delay_ns;
1052 bool pass = false;
1053
1054 fread_timing_val &= mask;
1055 fread_timing_val |= hcycle << shift;
1056
1057 /* no DI input delay first */
1058 writel(fread_timing_val, TIMING_REG_AST2600(chip));
1059 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
1060 dev_dbg(aspi->dev,
1061 " * [%08x] %d HCLK delay, DI delay none : %s",
1062 fread_timing_val, hcycle, pass ? "PASS" : "FAIL");
1063 if (pass)
1064 return 0;
1065
1066 /* Add DI input delays */
1067 fread_timing_val &= mask;
1068 fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift;
1069
1070 for (delay_ns = 0; delay_ns < 0x10; delay_ns++) {
1071 fread_timing_val &= ~(0xf << (4 + shift));
1072 fread_timing_val |= delay_ns << (4 + shift);
1073
1074 writel(fread_timing_val, TIMING_REG_AST2600(chip));
1075 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
1076 dev_dbg(aspi->dev,
1077 " * [%08x] %d HCLK delay, DI delay %d.%dns : %s",
1078 fread_timing_val, hcycle, (delay_ns + 1) / 2,
1079 (delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL");
1080 /*
1081 * TODO: This is optimistic. We should look
1082 * for a working interval and save the middle
1083 * value in the read timing register.
1084 */
1085 if (pass)
1086 return 0;
1087 }
1088 }
1089
1090 /* No good setting for this frequency */
1091 return -1;
1092}
1093
1094/*
1095 * Platform definitions
1096 */
1097static const struct aspeed_spi_data ast2400_fmc_data = {
1098 .max_cs = 5,
1099 .hastype = true,
1100 .we0 = 16,
1101 .ctl0 = CE0_CTRL_REG,
1102 .timing = CE0_TIMING_COMPENSATION_REG,
1103 .hclk_mask = 0xfffff0ff,
1104 .hdiv_max = 1,
1105 .calibrate = aspeed_spi_calibrate,
1106 .segment_start = aspeed_spi_segment_start,
1107 .segment_end = aspeed_spi_segment_end,
1108 .segment_reg = aspeed_spi_segment_reg,
1109};
1110
1111static const struct aspeed_spi_data ast2400_spi_data = {
1112 .max_cs = 1,
1113 .hastype = false,
1114 .we0 = 0,
1115 .ctl0 = 0x04,
1116 .timing = 0x14,
1117 .hclk_mask = 0xfffff0ff,
1118 .hdiv_max = 1,
1119 .calibrate = aspeed_spi_calibrate,
1120 /* No segment registers */
1121};
1122
1123static const struct aspeed_spi_data ast2500_fmc_data = {
1124 .max_cs = 3,
1125 .hastype = true,
1126 .we0 = 16,
1127 .ctl0 = CE0_CTRL_REG,
1128 .timing = CE0_TIMING_COMPENSATION_REG,
1129 .hclk_mask = 0xffffd0ff,
1130 .hdiv_max = 1,
1131 .calibrate = aspeed_spi_calibrate,
1132 .segment_start = aspeed_spi_segment_start,
1133 .segment_end = aspeed_spi_segment_end,
1134 .segment_reg = aspeed_spi_segment_reg,
1135};
1136
1137static const struct aspeed_spi_data ast2500_spi_data = {
1138 .max_cs = 2,
1139 .hastype = false,
1140 .we0 = 16,
1141 .ctl0 = CE0_CTRL_REG,
1142 .timing = CE0_TIMING_COMPENSATION_REG,
1143 .hclk_mask = 0xffffd0ff,
1144 .hdiv_max = 1,
1145 .calibrate = aspeed_spi_calibrate,
1146 .segment_start = aspeed_spi_segment_start,
1147 .segment_end = aspeed_spi_segment_end,
1148 .segment_reg = aspeed_spi_segment_reg,
1149};
1150
1151static const struct aspeed_spi_data ast2600_fmc_data = {
1152 .max_cs = 3,
1153 .hastype = false,
1154 .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
1155 .we0 = 16,
1156 .ctl0 = CE0_CTRL_REG,
1157 .timing = CE0_TIMING_COMPENSATION_REG,
1158 .hclk_mask = 0xf0fff0ff,
1159 .hdiv_max = 2,
1160 .calibrate = aspeed_spi_ast2600_calibrate,
1161 .segment_start = aspeed_spi_segment_ast2600_start,
1162 .segment_end = aspeed_spi_segment_ast2600_end,
1163 .segment_reg = aspeed_spi_segment_ast2600_reg,
1164};
1165
1166static const struct aspeed_spi_data ast2600_spi_data = {
1167 .max_cs = 2,
1168 .hastype = false,
1169 .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
1170 .we0 = 16,
1171 .ctl0 = CE0_CTRL_REG,
1172 .timing = CE0_TIMING_COMPENSATION_REG,
1173 .hclk_mask = 0xf0fff0ff,
1174 .hdiv_max = 2,
1175 .calibrate = aspeed_spi_ast2600_calibrate,
1176 .segment_start = aspeed_spi_segment_ast2600_start,
1177 .segment_end = aspeed_spi_segment_ast2600_end,
1178 .segment_reg = aspeed_spi_segment_ast2600_reg,
1179};
1180
1181static const struct of_device_id aspeed_spi_matches[] = {
1182 { .compatible = "aspeed,ast2400-fmc", .data = &ast2400_fmc_data },
1183 { .compatible = "aspeed,ast2400-spi", .data = &ast2400_spi_data },
1184 { .compatible = "aspeed,ast2500-fmc", .data = &ast2500_fmc_data },
1185 { .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data },
1186 { .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data },
1187 { .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data },
1188 { }
1189};
1190MODULE_DEVICE_TABLE(of, aspeed_spi_matches);
1191
1192static struct platform_driver aspeed_spi_driver = {
1193 .probe = aspeed_spi_probe,
1194 .remove = aspeed_spi_remove,
1195 .driver = {
1196 .name = DEVICE_NAME,
1197 .of_match_table = aspeed_spi_matches,
1198 }
1199};
1200
1201module_platform_driver(aspeed_spi_driver);
1202
1203MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
1204MODULE_AUTHOR("Chin-Ting Kuo <chin-ting_kuo@aspeedtech.com>");
1205MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>");
1206MODULE_LICENSE("GPL v2");