Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2//
  3// Copyright (C) 2018 Macronix International Co., Ltd.
  4//
  5// Authors:
  6//	Mason Yang <masonccyang@mxic.com.tw>
  7//	zhengxunli <zhengxunli@mxic.com.tw>
  8//	Boris Brezillon <boris.brezillon@bootlin.com>
  9//
 10
 11#include <linux/clk.h>
 12#include <linux/io.h>
 13#include <linux/iopoll.h>
 14#include <linux/module.h>
 15#include <linux/mtd/nand.h>
 16#include <linux/mtd/nand-ecc-mxic.h>
 17#include <linux/platform_device.h>
 18#include <linux/pm_runtime.h>
 19#include <linux/spi/spi.h>
 20#include <linux/spi/spi-mem.h>
 21
 22#define HC_CFG			0x0
 23#define HC_CFG_IF_CFG(x)	((x) << 27)
 24#define HC_CFG_DUAL_SLAVE	BIT(31)
 25#define HC_CFG_INDIVIDUAL	BIT(30)
 26#define HC_CFG_NIO(x)		(((x) / 4) << 27)
 27#define HC_CFG_TYPE(s, t)	((t) << (23 + ((s) * 2)))
 28#define HC_CFG_TYPE_SPI_NOR	0
 29#define HC_CFG_TYPE_SPI_NAND	1
 30#define HC_CFG_TYPE_SPI_RAM	2
 31#define HC_CFG_TYPE_RAW_NAND	3
 32#define HC_CFG_SLV_ACT(x)	((x) << 21)
 33#define HC_CFG_CLK_PH_EN	BIT(20)
 34#define HC_CFG_CLK_POL_INV	BIT(19)
 35#define HC_CFG_BIG_ENDIAN	BIT(18)
 36#define HC_CFG_DATA_PASS	BIT(17)
 37#define HC_CFG_IDLE_SIO_LVL(x)	((x) << 16)
 38#define HC_CFG_MAN_START_EN	BIT(3)
 39#define HC_CFG_MAN_START	BIT(2)
 40#define HC_CFG_MAN_CS_EN	BIT(1)
 41#define HC_CFG_MAN_CS_ASSERT	BIT(0)
 42
 43#define INT_STS			0x4
 44#define INT_STS_EN		0x8
 45#define INT_SIG_EN		0xc
 46#define INT_STS_ALL		GENMASK(31, 0)
 47#define INT_RDY_PIN		BIT(26)
 48#define INT_RDY_SR		BIT(25)
 49#define INT_LNR_SUSP		BIT(24)
 50#define INT_ECC_ERR		BIT(17)
 51#define INT_CRC_ERR		BIT(16)
 52#define INT_LWR_DIS		BIT(12)
 53#define INT_LRD_DIS		BIT(11)
 54#define INT_SDMA_INT		BIT(10)
 55#define INT_DMA_FINISH		BIT(9)
 56#define INT_RX_NOT_FULL		BIT(3)
 57#define INT_RX_NOT_EMPTY	BIT(2)
 58#define INT_TX_NOT_FULL		BIT(1)
 59#define INT_TX_EMPTY		BIT(0)
 60
 61#define HC_EN			0x10
 62#define HC_EN_BIT		BIT(0)
 63
 64#define TXD(x)			(0x14 + ((x) * 4))
 65#define RXD			0x24
 66
 67#define SS_CTRL(s)		(0x30 + ((s) * 4))
 68#define LRD_CFG			0x44
 69#define LWR_CFG			0x80
 70#define RWW_CFG			0x70
 71#define OP_READ			BIT(23)
 72#define OP_DUMMY_CYC(x)		((x) << 17)
 73#define OP_ADDR_BYTES(x)	((x) << 14)
 74#define OP_CMD_BYTES(x)		(((x) - 1) << 13)
 75#define OP_OCTA_CRC_EN		BIT(12)
 76#define OP_DQS_EN		BIT(11)
 77#define OP_ENHC_EN		BIT(10)
 78#define OP_PREAMBLE_EN		BIT(9)
 79#define OP_DATA_DDR		BIT(8)
 80#define OP_DATA_BUSW(x)		((x) << 6)
 81#define OP_ADDR_DDR		BIT(5)
 82#define OP_ADDR_BUSW(x)		((x) << 3)
 83#define OP_CMD_DDR		BIT(2)
 84#define OP_CMD_BUSW(x)		(x)
 85#define OP_BUSW_1		0
 86#define OP_BUSW_2		1
 87#define OP_BUSW_4		2
 88#define OP_BUSW_8		3
 89
 90#define OCTA_CRC		0x38
 91#define OCTA_CRC_IN_EN(s)	BIT(3 + ((s) * 16))
 92#define OCTA_CRC_CHUNK(s, x)	((fls((x) / 32)) << (1 + ((s) * 16)))
 93#define OCTA_CRC_OUT_EN(s)	BIT(0 + ((s) * 16))
 94
 95#define ONFI_DIN_CNT(s)		(0x3c + (s))
 96
 97#define LRD_CTRL		0x48
 98#define RWW_CTRL		0x74
 99#define LWR_CTRL		0x84
100#define LMODE_EN		BIT(31)
101#define LMODE_SLV_ACT(x)	((x) << 21)
102#define LMODE_CMD1(x)		((x) << 8)
103#define LMODE_CMD0(x)		(x)
104
105#define LRD_ADDR		0x4c
106#define LWR_ADDR		0x88
107#define LRD_RANGE		0x50
108#define LWR_RANGE		0x8c
109
110#define AXI_SLV_ADDR		0x54
111
112#define DMAC_RD_CFG		0x58
113#define DMAC_WR_CFG		0x94
114#define DMAC_CFG_PERIPH_EN	BIT(31)
115#define DMAC_CFG_ALLFLUSH_EN	BIT(30)
116#define DMAC_CFG_LASTFLUSH_EN	BIT(29)
117#define DMAC_CFG_QE(x)		(((x) + 1) << 16)
118#define DMAC_CFG_BURST_LEN(x)	(((x) + 1) << 12)
119#define DMAC_CFG_BURST_SZ(x)	((x) << 8)
120#define DMAC_CFG_DIR_READ	BIT(1)
121#define DMAC_CFG_START		BIT(0)
122
123#define DMAC_RD_CNT		0x5c
124#define DMAC_WR_CNT		0x98
125
126#define SDMA_ADDR		0x60
127
128#define DMAM_CFG		0x64
129#define DMAM_CFG_START		BIT(31)
130#define DMAM_CFG_CONT		BIT(30)
131#define DMAM_CFG_SDMA_GAP(x)	(fls((x) / 8192) << 2)
132#define DMAM_CFG_DIR_READ	BIT(1)
133#define DMAM_CFG_EN		BIT(0)
134
135#define DMAM_CNT		0x68
136
137#define LNR_TIMER_TH		0x6c
138
139#define RDM_CFG0		0x78
140#define RDM_CFG0_POLY(x)	(x)
141
142#define RDM_CFG1		0x7c
143#define RDM_CFG1_RDM_EN		BIT(31)
144#define RDM_CFG1_SEED(x)	(x)
145
146#define LWR_SUSP_CTRL		0x90
147#define LWR_SUSP_CTRL_EN	BIT(31)
148
149#define DMAS_CTRL		0x9c
150#define DMAS_CTRL_EN		BIT(31)
151#define DMAS_CTRL_DIR_READ	BIT(30)
152
153#define DATA_STROB		0xa0
154#define DATA_STROB_EDO_EN	BIT(2)
155#define DATA_STROB_INV_POL	BIT(1)
156#define DATA_STROB_DELAY_2CYC	BIT(0)
157
158#define IDLY_CODE(x)		(0xa4 + ((x) * 4))
159#define IDLY_CODE_VAL(x, v)	((v) << (((x) % 4) * 8))
160
161#define GPIO			0xc4
162#define GPIO_PT(x)		BIT(3 + ((x) * 16))
163#define GPIO_RESET(x)		BIT(2 + ((x) * 16))
164#define GPIO_HOLDB(x)		BIT(1 + ((x) * 16))
165#define GPIO_WPB(x)		BIT((x) * 16)
166
167#define HC_VER			0xd0
168
169#define HW_TEST(x)		(0xe0 + ((x) * 4))
170
171struct mxic_spi {
172	struct device *dev;
173	struct clk *ps_clk;
174	struct clk *send_clk;
175	struct clk *send_dly_clk;
176	void __iomem *regs;
177	u32 cur_speed_hz;
178	struct {
179		void __iomem *map;
180		dma_addr_t dma;
181		size_t size;
182	} linear;
183
184	struct {
185		bool use_pipelined_conf;
186		struct nand_ecc_engine *pipelined_engine;
187		void *ctx;
188	} ecc;
189};
190
191static int mxic_spi_clk_enable(struct mxic_spi *mxic)
192{
193	int ret;
194
195	ret = clk_prepare_enable(mxic->send_clk);
196	if (ret)
197		return ret;
198
199	ret = clk_prepare_enable(mxic->send_dly_clk);
200	if (ret)
201		goto err_send_dly_clk;
202
203	return ret;
204
205err_send_dly_clk:
206	clk_disable_unprepare(mxic->send_clk);
207
208	return ret;
209}
210
211static void mxic_spi_clk_disable(struct mxic_spi *mxic)
212{
213	clk_disable_unprepare(mxic->send_clk);
214	clk_disable_unprepare(mxic->send_dly_clk);
215}
216
217static void mxic_spi_set_input_delay_dqs(struct mxic_spi *mxic, u8 idly_code)
218{
219	writel(IDLY_CODE_VAL(0, idly_code) |
220	       IDLY_CODE_VAL(1, idly_code) |
221	       IDLY_CODE_VAL(2, idly_code) |
222	       IDLY_CODE_VAL(3, idly_code),
223	       mxic->regs + IDLY_CODE(0));
224	writel(IDLY_CODE_VAL(4, idly_code) |
225	       IDLY_CODE_VAL(5, idly_code) |
226	       IDLY_CODE_VAL(6, idly_code) |
227	       IDLY_CODE_VAL(7, idly_code),
228	       mxic->regs + IDLY_CODE(1));
229}
230
231static int mxic_spi_clk_setup(struct mxic_spi *mxic, unsigned long freq)
232{
233	int ret;
234
235	ret = clk_set_rate(mxic->send_clk, freq);
236	if (ret)
237		return ret;
238
239	ret = clk_set_rate(mxic->send_dly_clk, freq);
240	if (ret)
241		return ret;
242
243	/*
244	 * A constant delay range from 0x0 ~ 0x1F for input delay,
245	 * the unit is 78 ps, the max input delay is 2.418 ns.
246	 */
247	mxic_spi_set_input_delay_dqs(mxic, 0xf);
248
249	/*
250	 * Phase degree = 360 * freq * output-delay
251	 * where output-delay is a constant value 1 ns in FPGA.
252	 *
253	 * Get Phase degree = 360 * freq * 1 ns
254	 *                  = 360 * freq * 1 sec / 1000000000
255	 *                  = 9 * freq / 25000000
256	 */
257	ret = clk_set_phase(mxic->send_dly_clk, 9 * freq / 25000000);
258	if (ret)
259		return ret;
260
261	return 0;
262}
263
264static int mxic_spi_set_freq(struct mxic_spi *mxic, unsigned long freq)
265{
266	int ret;
267
268	if (mxic->cur_speed_hz == freq)
269		return 0;
270
271	mxic_spi_clk_disable(mxic);
272	ret = mxic_spi_clk_setup(mxic, freq);
273	if (ret)
274		return ret;
275
276	ret = mxic_spi_clk_enable(mxic);
277	if (ret)
278		return ret;
279
280	mxic->cur_speed_hz = freq;
281
282	return 0;
283}
284
285static void mxic_spi_hw_init(struct mxic_spi *mxic)
286{
287	writel(0, mxic->regs + DATA_STROB);
288	writel(INT_STS_ALL, mxic->regs + INT_STS_EN);
289	writel(0, mxic->regs + HC_EN);
290	writel(0, mxic->regs + LRD_CFG);
291	writel(0, mxic->regs + LRD_CTRL);
292	writel(HC_CFG_NIO(1) | HC_CFG_TYPE(0, HC_CFG_TYPE_SPI_NOR) |
293	       HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN | HC_CFG_IDLE_SIO_LVL(1),
294	       mxic->regs + HC_CFG);
295}
296
297static u32 mxic_spi_prep_hc_cfg(struct spi_device *spi, u32 flags,
298				bool swap16)
299{
300	int nio = 1;
301
302	if (spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL))
303		nio = 8;
304	else if (spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
305		nio = 4;
306	else if (spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
307		nio = 2;
308
309	if (swap16)
310		flags &= ~HC_CFG_DATA_PASS;
311	else
312		flags |= HC_CFG_DATA_PASS;
313
314	return flags | HC_CFG_NIO(nio) |
315	       HC_CFG_TYPE(spi_get_chipselect(spi, 0), HC_CFG_TYPE_SPI_NOR) |
316	       HC_CFG_SLV_ACT(spi_get_chipselect(spi, 0)) | HC_CFG_IDLE_SIO_LVL(1);
317}
318
319static u32 mxic_spi_mem_prep_op_cfg(const struct spi_mem_op *op,
320				    unsigned int data_len)
321{
322	u32 cfg = OP_CMD_BYTES(op->cmd.nbytes) |
323		  OP_CMD_BUSW(fls(op->cmd.buswidth) - 1) |
324		  (op->cmd.dtr ? OP_CMD_DDR : 0);
325
326	if (op->addr.nbytes)
327		cfg |= OP_ADDR_BYTES(op->addr.nbytes) |
328		       OP_ADDR_BUSW(fls(op->addr.buswidth) - 1) |
329		       (op->addr.dtr ? OP_ADDR_DDR : 0);
330
331	if (op->dummy.nbytes)
332		cfg |= OP_DUMMY_CYC(op->dummy.nbytes);
333
334	/* Direct mapping data.nbytes field is not populated */
335	if (data_len) {
336		cfg |= OP_DATA_BUSW(fls(op->data.buswidth) - 1) |
337		       (op->data.dtr ? OP_DATA_DDR : 0);
338		if (op->data.dir == SPI_MEM_DATA_IN) {
339			cfg |= OP_READ;
340			if (op->data.dtr)
341				cfg |= OP_DQS_EN;
342		}
343	}
344
345	return cfg;
346}
347
348static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
349			      void *rxbuf, unsigned int len)
350{
351	unsigned int pos = 0;
352
353	while (pos < len) {
354		unsigned int nbytes = len - pos;
355		u32 data = 0xffffffff;
356		u32 sts;
357		int ret;
358
359		if (nbytes > 4)
360			nbytes = 4;
361
362		if (txbuf)
363			memcpy(&data, txbuf + pos, nbytes);
364
365		ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
366					 sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
367		if (ret)
368			return ret;
369
370		writel(data, mxic->regs + TXD(nbytes % 4));
371
372		ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
373					 sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
374		if (ret)
375			return ret;
376
377		ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
378					 sts & INT_RX_NOT_EMPTY, 0,
379					 USEC_PER_SEC);
380		if (ret)
381			return ret;
382
383		data = readl(mxic->regs + RXD);
384		if (rxbuf) {
385			data >>= (8 * (4 - nbytes));
386			memcpy(rxbuf + pos, &data, nbytes);
387		}
388		WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
389
390		pos += nbytes;
391	}
392
393	return 0;
394}
395
396static ssize_t mxic_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
397					u64 offs, size_t len, void *buf)
398{
399	struct mxic_spi *mxic = spi_controller_get_devdata(desc->mem->spi->controller);
400	int ret;
401	u32 sts;
402
403	if (WARN_ON(offs + desc->info.offset + len > U32_MAX))
404		return -EINVAL;
405
406	writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0, desc->info.op_tmpl.data.swap16),
407	       mxic->regs + HC_CFG);
408
409	writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len),
410	       mxic->regs + LRD_CFG);
411	writel(desc->info.offset + offs, mxic->regs + LRD_ADDR);
412	len = min_t(size_t, len, mxic->linear.size);
413	writel(len, mxic->regs + LRD_RANGE);
414	writel(LMODE_CMD0(desc->info.op_tmpl.cmd.opcode) |
415	       LMODE_SLV_ACT(spi_get_chipselect(desc->mem->spi, 0)) |
416	       LMODE_EN,
417	       mxic->regs + LRD_CTRL);
418
419	if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) {
420		ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine,
421						      NAND_PAGE_READ,
422						      mxic->linear.dma + offs);
423		if (ret)
424			return ret;
425	} else {
426		memcpy_fromio(buf, mxic->linear.map, len);
427	}
428
429	writel(INT_LRD_DIS, mxic->regs + INT_STS);
430	writel(0, mxic->regs + LRD_CTRL);
431
432	ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
433				 sts & INT_LRD_DIS, 0, USEC_PER_SEC);
434	if (ret)
435		return ret;
436
437	return len;
438}
439
440static ssize_t mxic_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
441					 u64 offs, size_t len,
442					 const void *buf)
443{
444	struct mxic_spi *mxic = spi_controller_get_devdata(desc->mem->spi->controller);
445	u32 sts;
446	int ret;
447
448	if (WARN_ON(offs + desc->info.offset + len > U32_MAX))
449		return -EINVAL;
450
451	writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0, desc->info.op_tmpl.data.swap16),
452	       mxic->regs + HC_CFG);
453
454	writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len),
455	       mxic->regs + LWR_CFG);
456	writel(desc->info.offset + offs, mxic->regs + LWR_ADDR);
457	len = min_t(size_t, len, mxic->linear.size);
458	writel(len, mxic->regs + LWR_RANGE);
459	writel(LMODE_CMD0(desc->info.op_tmpl.cmd.opcode) |
460	       LMODE_SLV_ACT(spi_get_chipselect(desc->mem->spi, 0)) |
461	       LMODE_EN,
462	       mxic->regs + LWR_CTRL);
463
464	if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) {
465		ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine,
466						      NAND_PAGE_WRITE,
467						      mxic->linear.dma + offs);
468		if (ret)
469			return ret;
470	} else {
471		memcpy_toio(mxic->linear.map, buf, len);
472	}
473
474	writel(INT_LWR_DIS, mxic->regs + INT_STS);
475	writel(0, mxic->regs + LWR_CTRL);
476
477	ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
478				 sts & INT_LWR_DIS, 0, USEC_PER_SEC);
479	if (ret)
480		return ret;
481
482	return len;
483}
484
485static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
486				     const struct spi_mem_op *op)
487{
488	if (op->data.buswidth > 8 || op->addr.buswidth > 8 ||
489	    op->dummy.buswidth > 8 || op->cmd.buswidth > 8)
490		return false;
491
492	if (op->data.nbytes && op->dummy.nbytes &&
493	    op->data.buswidth != op->dummy.buswidth)
494		return false;
495
496	if (op->addr.nbytes > 7)
497		return false;
498
499	return spi_mem_default_supports_op(mem, op);
500}
501
502static int mxic_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
503{
504	struct mxic_spi *mxic = spi_controller_get_devdata(desc->mem->spi->controller);
505
506	if (!mxic->linear.map)
507		return -EOPNOTSUPP;
508
509	if (desc->info.offset + desc->info.length > U32_MAX)
510		return -EINVAL;
511
512	if (!mxic_spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
513		return -EOPNOTSUPP;
514
515	return 0;
516}
517
518static int mxic_spi_mem_exec_op(struct spi_mem *mem,
519				const struct spi_mem_op *op)
520{
521	struct mxic_spi *mxic = spi_controller_get_devdata(mem->spi->controller);
522	int i, ret;
523	u8 addr[8], cmd[2];
524
525	ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
526	if (ret)
527		return ret;
528
529	writel(mxic_spi_prep_hc_cfg(mem->spi, HC_CFG_MAN_CS_EN, op->data.swap16),
530	       mxic->regs + HC_CFG);
531
532	writel(HC_EN_BIT, mxic->regs + HC_EN);
533
534	writel(mxic_spi_mem_prep_op_cfg(op, op->data.nbytes),
535	       mxic->regs + SS_CTRL(spi_get_chipselect(mem->spi, 0)));
536
537	writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
538	       mxic->regs + HC_CFG);
539
540	for (i = 0; i < op->cmd.nbytes; i++)
541		cmd[i] = op->cmd.opcode >> (8 * (op->cmd.nbytes - i - 1));
542
543	ret = mxic_spi_data_xfer(mxic, cmd, NULL, op->cmd.nbytes);
544	if (ret)
545		goto out;
546
547	for (i = 0; i < op->addr.nbytes; i++)
548		addr[i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
549
550	ret = mxic_spi_data_xfer(mxic, addr, NULL, op->addr.nbytes);
551	if (ret)
552		goto out;
553
554	ret = mxic_spi_data_xfer(mxic, NULL, NULL, op->dummy.nbytes);
555	if (ret)
556		goto out;
557
558	ret = mxic_spi_data_xfer(mxic,
559				 op->data.dir == SPI_MEM_DATA_OUT ?
560				 op->data.buf.out : NULL,
561				 op->data.dir == SPI_MEM_DATA_IN ?
562				 op->data.buf.in : NULL,
563				 op->data.nbytes);
564
565out:
566	writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
567	       mxic->regs + HC_CFG);
568	writel(0, mxic->regs + HC_EN);
569
570	return ret;
571}
572
573static const struct spi_controller_mem_ops mxic_spi_mem_ops = {
574	.supports_op = mxic_spi_mem_supports_op,
575	.exec_op = mxic_spi_mem_exec_op,
576	.dirmap_create = mxic_spi_mem_dirmap_create,
577	.dirmap_read = mxic_spi_mem_dirmap_read,
578	.dirmap_write = mxic_spi_mem_dirmap_write,
579};
580
581static const struct spi_controller_mem_caps mxic_spi_mem_caps = {
582	.dtr = true,
583	.ecc = true,
584	.swap16 = true,
585};
586
587static void mxic_spi_set_cs(struct spi_device *spi, bool lvl)
588{
589	struct mxic_spi *mxic = spi_controller_get_devdata(spi->controller);
590
591	if (!lvl) {
592		writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_EN,
593		       mxic->regs + HC_CFG);
594		writel(HC_EN_BIT, mxic->regs + HC_EN);
595		writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
596		       mxic->regs + HC_CFG);
597	} else {
598		writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
599		       mxic->regs + HC_CFG);
600		writel(0, mxic->regs + HC_EN);
601	}
602}
603
604static int mxic_spi_transfer_one(struct spi_controller *host,
605				 struct spi_device *spi,
606				 struct spi_transfer *t)
607{
608	struct mxic_spi *mxic = spi_controller_get_devdata(host);
609	unsigned int busw = OP_BUSW_1;
610	int ret;
611
612	if (t->rx_buf && t->tx_buf) {
613		if (((spi->mode & SPI_TX_QUAD) &&
614		     !(spi->mode & SPI_RX_QUAD)) ||
615		    ((spi->mode & SPI_TX_DUAL) &&
616		     !(spi->mode & SPI_RX_DUAL)))
617			return -ENOTSUPP;
618	}
619
620	ret = mxic_spi_set_freq(mxic, t->speed_hz);
621	if (ret)
622		return ret;
623
624	if (t->tx_buf) {
625		if (spi->mode & SPI_TX_QUAD)
626			busw = OP_BUSW_4;
627		else if (spi->mode & SPI_TX_DUAL)
628			busw = OP_BUSW_2;
629	} else if (t->rx_buf) {
630		if (spi->mode & SPI_RX_QUAD)
631			busw = OP_BUSW_4;
632		else if (spi->mode & SPI_RX_DUAL)
633			busw = OP_BUSW_2;
634	}
635
636	writel(OP_CMD_BYTES(1) | OP_CMD_BUSW(busw) |
637	       OP_DATA_BUSW(busw) | (t->rx_buf ? OP_READ : 0),
638	       mxic->regs + SS_CTRL(0));
639
640	ret = mxic_spi_data_xfer(mxic, t->tx_buf, t->rx_buf, t->len);
641	if (ret)
642		return ret;
643
644	spi_finalize_current_transfer(host);
645
646	return 0;
647}
648
649/* ECC wrapper */
650static int mxic_spi_mem_ecc_init_ctx(struct nand_device *nand)
651{
652	const struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
653	struct mxic_spi *mxic = nand->ecc.engine->priv;
654
655	mxic->ecc.use_pipelined_conf = true;
656
657	return ops->init_ctx(nand);
658}
659
660static void mxic_spi_mem_ecc_cleanup_ctx(struct nand_device *nand)
661{
662	const struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
663	struct mxic_spi *mxic = nand->ecc.engine->priv;
664
665	mxic->ecc.use_pipelined_conf = false;
666
667	ops->cleanup_ctx(nand);
668}
669
670static int mxic_spi_mem_ecc_prepare_io_req(struct nand_device *nand,
671					   struct nand_page_io_req *req)
672{
673	const struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
674
675	return ops->prepare_io_req(nand, req);
676}
677
678static int mxic_spi_mem_ecc_finish_io_req(struct nand_device *nand,
679					  struct nand_page_io_req *req)
680{
681	const struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
682
683	return ops->finish_io_req(nand, req);
684}
685
686static const struct nand_ecc_engine_ops mxic_spi_mem_ecc_engine_pipelined_ops = {
687	.init_ctx = mxic_spi_mem_ecc_init_ctx,
688	.cleanup_ctx = mxic_spi_mem_ecc_cleanup_ctx,
689	.prepare_io_req = mxic_spi_mem_ecc_prepare_io_req,
690	.finish_io_req = mxic_spi_mem_ecc_finish_io_req,
691};
692
693static void mxic_spi_mem_ecc_remove(struct mxic_spi *mxic)
694{
695	if (mxic->ecc.pipelined_engine) {
696		mxic_ecc_put_pipelined_engine(mxic->ecc.pipelined_engine);
697		nand_ecc_unregister_on_host_hw_engine(mxic->ecc.pipelined_engine);
698	}
699}
700
701static int mxic_spi_mem_ecc_probe(struct platform_device *pdev,
702				  struct mxic_spi *mxic)
703{
704	struct nand_ecc_engine *eng;
705
706	if (!mxic_ecc_get_pipelined_ops())
707		return -EOPNOTSUPP;
708
709	eng = mxic_ecc_get_pipelined_engine(pdev);
710	if (IS_ERR(eng))
711		return PTR_ERR(eng);
712
713	eng->dev = &pdev->dev;
714	eng->integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
715	eng->ops = &mxic_spi_mem_ecc_engine_pipelined_ops;
716	eng->priv = mxic;
717	mxic->ecc.pipelined_engine = eng;
718	nand_ecc_register_on_host_hw_engine(eng);
719
720	return 0;
721}
722
723static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
724{
725	struct spi_controller *host = dev_get_drvdata(dev);
726	struct mxic_spi *mxic = spi_controller_get_devdata(host);
727
728	mxic_spi_clk_disable(mxic);
729	clk_disable_unprepare(mxic->ps_clk);
730
731	return 0;
732}
733
734static int __maybe_unused mxic_spi_runtime_resume(struct device *dev)
735{
736	struct spi_controller *host = dev_get_drvdata(dev);
737	struct mxic_spi *mxic = spi_controller_get_devdata(host);
738	int ret;
739
740	ret = clk_prepare_enable(mxic->ps_clk);
741	if (ret) {
742		dev_err(dev, "Cannot enable ps_clock.\n");
743		return ret;
744	}
745
746	return mxic_spi_clk_enable(mxic);
747}
748
749static const struct dev_pm_ops mxic_spi_dev_pm_ops = {
750	SET_RUNTIME_PM_OPS(mxic_spi_runtime_suspend,
751			   mxic_spi_runtime_resume, NULL)
752};
753
754static int mxic_spi_probe(struct platform_device *pdev)
755{
756	struct spi_controller *host;
757	struct resource *res;
758	struct mxic_spi *mxic;
759	int ret;
760
761	host = devm_spi_alloc_host(&pdev->dev, sizeof(struct mxic_spi));
762	if (!host)
763		return -ENOMEM;
764
765	platform_set_drvdata(pdev, host);
766
767	mxic = spi_controller_get_devdata(host);
768	mxic->dev = &pdev->dev;
769
770	host->dev.of_node = pdev->dev.of_node;
771
772	mxic->ps_clk = devm_clk_get(&pdev->dev, "ps_clk");
773	if (IS_ERR(mxic->ps_clk))
774		return PTR_ERR(mxic->ps_clk);
775
776	mxic->send_clk = devm_clk_get(&pdev->dev, "send_clk");
777	if (IS_ERR(mxic->send_clk))
778		return PTR_ERR(mxic->send_clk);
779
780	mxic->send_dly_clk = devm_clk_get(&pdev->dev, "send_dly_clk");
781	if (IS_ERR(mxic->send_dly_clk))
782		return PTR_ERR(mxic->send_dly_clk);
783
784	mxic->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
785	if (IS_ERR(mxic->regs))
786		return PTR_ERR(mxic->regs);
787
788	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
789	mxic->linear.map = devm_ioremap_resource(&pdev->dev, res);
790	if (!IS_ERR(mxic->linear.map)) {
791		mxic->linear.dma = res->start;
792		mxic->linear.size = resource_size(res);
793	} else {
794		mxic->linear.map = NULL;
795	}
796
797	pm_runtime_enable(&pdev->dev);
798	host->auto_runtime_pm = true;
799
800	host->num_chipselect = 1;
801	host->mem_ops = &mxic_spi_mem_ops;
802	host->mem_caps = &mxic_spi_mem_caps;
803
804	host->set_cs = mxic_spi_set_cs;
805	host->transfer_one = mxic_spi_transfer_one;
806	host->bits_per_word_mask = SPI_BPW_MASK(8);
807	host->mode_bits = SPI_CPOL | SPI_CPHA |
808			  SPI_RX_DUAL | SPI_TX_DUAL |
809			  SPI_RX_QUAD | SPI_TX_QUAD |
810			  SPI_RX_OCTAL | SPI_TX_OCTAL;
811
812	mxic_spi_hw_init(mxic);
813
814	ret = mxic_spi_mem_ecc_probe(pdev, mxic);
815	if (ret == -EPROBE_DEFER) {
816		pm_runtime_disable(&pdev->dev);
817		return ret;
818	}
819
820	ret = spi_register_controller(host);
821	if (ret) {
822		dev_err(&pdev->dev, "spi_register_controller failed\n");
823		pm_runtime_disable(&pdev->dev);
824		mxic_spi_mem_ecc_remove(mxic);
825	}
826
827	return ret;
828}
829
830static void mxic_spi_remove(struct platform_device *pdev)
831{
832	struct spi_controller *host = platform_get_drvdata(pdev);
833	struct mxic_spi *mxic = spi_controller_get_devdata(host);
834
835	pm_runtime_disable(&pdev->dev);
836	mxic_spi_mem_ecc_remove(mxic);
837	spi_unregister_controller(host);
838}
839
840static const struct of_device_id mxic_spi_of_ids[] = {
841	{ .compatible = "mxicy,mx25f0a-spi", },
842	{ /* sentinel */ }
843};
844MODULE_DEVICE_TABLE(of, mxic_spi_of_ids);
845
846static struct platform_driver mxic_spi_driver = {
847	.probe = mxic_spi_probe,
848	.remove = mxic_spi_remove,
849	.driver = {
850		.name = "mxic-spi",
851		.of_match_table = mxic_spi_of_ids,
852		.pm = &mxic_spi_dev_pm_ops,
853	},
854};
855module_platform_driver(mxic_spi_driver);
856
857MODULE_AUTHOR("Mason Yang <masonccyang@mxic.com.tw>");
858MODULE_DESCRIPTION("MX25F0A SPI controller driver");
859MODULE_LICENSE("GPL v2");