Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2022 Jonathan Neuschäfer
  3
  4#include <linux/clk.h>
  5#include <linux/mfd/syscon.h>
  6#include <linux/module.h>
  7#include <linux/of_address.h>
  8#include <linux/of_device.h>
  9#include <linux/platform_device.h>
 10#include <linux/regmap.h>
 11#include <linux/spi/spi-mem.h>
 12
 13#define FIU_CFG		0x00
 14#define FIU_BURST_BFG	0x01
 15#define FIU_RESP_CFG	0x02
 16#define FIU_CFBB_PROT	0x03
 17#define FIU_FWIN1_LOW	0x04
 18#define FIU_FWIN1_HIGH	0x06
 19#define FIU_FWIN2_LOW	0x08
 20#define FIU_FWIN2_HIGH	0x0a
 21#define FIU_FWIN3_LOW	0x0c
 22#define FIU_FWIN3_HIGH	0x0e
 23#define FIU_PROT_LOCK	0x10
 24#define FIU_PROT_CLEAR	0x11
 25#define FIU_SPI_FL_CFG	0x14
 26#define FIU_UMA_CODE	0x16
 27#define FIU_UMA_AB0	0x17
 28#define FIU_UMA_AB1	0x18
 29#define FIU_UMA_AB2	0x19
 30#define FIU_UMA_DB0	0x1a
 31#define FIU_UMA_DB1	0x1b
 32#define FIU_UMA_DB2	0x1c
 33#define FIU_UMA_DB3	0x1d
 34#define FIU_UMA_CTS	0x1e
 35#define FIU_UMA_ECTS	0x1f
 36
 37#define FIU_BURST_CFG_R16	3
 38
 39#define FIU_UMA_CTS_D_SIZE(x)	(x)
 40#define FIU_UMA_CTS_A_SIZE	BIT(3)
 41#define FIU_UMA_CTS_WR		BIT(4)
 42#define FIU_UMA_CTS_CS(x)	((x) << 5)
 43#define FIU_UMA_CTS_EXEC_DONE	BIT(7)
 44
 45#define SHM_FLASH_SIZE	0x02
 46#define SHM_FLASH_SIZE_STALL_HOST BIT(6)
 47
 48/*
 49 * I observed a typical wait time of 16 iterations for a UMA transfer to
 50 * finish, so this should be a safe limit.
 51 */
 52#define UMA_WAIT_ITERATIONS 100
 53
 54/* The memory-mapped view of flash is 16 MiB long */
 55#define MAX_MEMORY_SIZE_PER_CS	(16 << 20)
 56#define MAX_MEMORY_SIZE_TOTAL	(4 * MAX_MEMORY_SIZE_PER_CS)
 57
 58struct wpcm_fiu_spi {
 59	struct device *dev;
 60	struct clk *clk;
 61	void __iomem *regs;
 62	void __iomem *memory;
 63	size_t memory_size;
 64	struct regmap *shm_regmap;
 65};
 66
 67static void wpcm_fiu_set_opcode(struct wpcm_fiu_spi *fiu, u8 opcode)
 68{
 69	writeb(opcode, fiu->regs + FIU_UMA_CODE);
 70}
 71
 72static void wpcm_fiu_set_addr(struct wpcm_fiu_spi *fiu, u32 addr)
 73{
 74	writeb((addr >>  0) & 0xff, fiu->regs + FIU_UMA_AB0);
 75	writeb((addr >>  8) & 0xff, fiu->regs + FIU_UMA_AB1);
 76	writeb((addr >> 16) & 0xff, fiu->regs + FIU_UMA_AB2);
 77}
 78
 79static void wpcm_fiu_set_data(struct wpcm_fiu_spi *fiu, const u8 *data, unsigned int nbytes)
 80{
 81	int i;
 82
 83	for (i = 0; i < nbytes; i++)
 84		writeb(data[i], fiu->regs + FIU_UMA_DB0 + i);
 85}
 86
 87static void wpcm_fiu_get_data(struct wpcm_fiu_spi *fiu, u8 *data, unsigned int nbytes)
 88{
 89	int i;
 90
 91	for (i = 0; i < nbytes; i++)
 92		data[i] = readb(fiu->regs + FIU_UMA_DB0 + i);
 93}
 94
 95/*
 96 * Perform a UMA (User Mode Access) operation, i.e. a software-controlled SPI transfer.
 97 */
 98static int wpcm_fiu_do_uma(struct wpcm_fiu_spi *fiu, unsigned int cs,
 99			   bool use_addr, bool write, int data_bytes)
100{
101	int i = 0;
102	u8 cts = FIU_UMA_CTS_EXEC_DONE | FIU_UMA_CTS_CS(cs);
103
104	if (use_addr)
105		cts |= FIU_UMA_CTS_A_SIZE;
106	if (write)
107		cts |= FIU_UMA_CTS_WR;
108	cts |= FIU_UMA_CTS_D_SIZE(data_bytes);
109
110	writeb(cts, fiu->regs + FIU_UMA_CTS);
111
112	for (i = 0; i < UMA_WAIT_ITERATIONS; i++)
113		if (!(readb(fiu->regs + FIU_UMA_CTS) & FIU_UMA_CTS_EXEC_DONE))
114			return 0;
115
116	dev_info(fiu->dev, "UMA transfer has not finished in %d iterations\n", UMA_WAIT_ITERATIONS);
117	return -EIO;
118}
119
120static void wpcm_fiu_ects_assert(struct wpcm_fiu_spi *fiu, unsigned int cs)
121{
122	u8 ects = readb(fiu->regs + FIU_UMA_ECTS);
123
124	ects &= ~BIT(cs);
125	writeb(ects, fiu->regs + FIU_UMA_ECTS);
126}
127
128static void wpcm_fiu_ects_deassert(struct wpcm_fiu_spi *fiu, unsigned int cs)
129{
130	u8 ects = readb(fiu->regs + FIU_UMA_ECTS);
131
132	ects |= BIT(cs);
133	writeb(ects, fiu->regs + FIU_UMA_ECTS);
134}
135
136struct wpcm_fiu_op_shape {
137	bool (*match)(const struct spi_mem_op *op);
138	int (*exec)(struct spi_mem *mem, const struct spi_mem_op *op);
139};
140
141static bool wpcm_fiu_normal_match(const struct spi_mem_op *op)
142{
143	// Opcode 0x0b (FAST READ) is treated differently in hardware
144	if (op->cmd.opcode == 0x0b)
145		return false;
146
147	return (op->addr.nbytes == 0 || op->addr.nbytes == 3) &&
148	       op->dummy.nbytes == 0 && op->data.nbytes <= 4;
149}
150
151static int wpcm_fiu_normal_exec(struct spi_mem *mem, const struct spi_mem_op *op)
152{
153	struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
154	int ret;
155
156	wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
157	wpcm_fiu_set_addr(fiu, op->addr.val);
158	if (op->data.dir == SPI_MEM_DATA_OUT)
159		wpcm_fiu_set_data(fiu, op->data.buf.out, op->data.nbytes);
160
161	ret = wpcm_fiu_do_uma(fiu, mem->spi->chip_select, op->addr.nbytes == 3,
162			      op->data.dir == SPI_MEM_DATA_OUT, op->data.nbytes);
163
164	if (op->data.dir == SPI_MEM_DATA_IN)
165		wpcm_fiu_get_data(fiu, op->data.buf.in, op->data.nbytes);
166
167	return ret;
168}
169
170static bool wpcm_fiu_fast_read_match(const struct spi_mem_op *op)
171{
172	return op->cmd.opcode == 0x0b && op->addr.nbytes == 3 &&
173	       op->dummy.nbytes == 1 &&
174	       op->data.nbytes >= 1 && op->data.nbytes <= 4 &&
175	       op->data.dir == SPI_MEM_DATA_IN;
176}
177
178static int wpcm_fiu_fast_read_exec(struct spi_mem *mem, const struct spi_mem_op *op)
179{
180	return -EINVAL;
181}
182
183/*
184 * 4-byte addressing.
185 *
186 * Flash view:  [ C  A  A  A   A     D  D  D  D]
187 * bytes:        13 aa bb cc  dd -> 5a a5 f0 0f
188 * FIU's view:  [ C  A  A  A][ C     D  D  D  D]
189 * FIU mode:    [ read/write][      read       ]
190 */
191static bool wpcm_fiu_4ba_match(const struct spi_mem_op *op)
192{
193	return op->addr.nbytes == 4 && op->dummy.nbytes == 0 && op->data.nbytes <= 4;
194}
195
196static int wpcm_fiu_4ba_exec(struct spi_mem *mem, const struct spi_mem_op *op)
197{
198	struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
199	int cs = mem->spi->chip_select;
200
201	wpcm_fiu_ects_assert(fiu, cs);
202
203	wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
204	wpcm_fiu_set_addr(fiu, op->addr.val >> 8);
205	wpcm_fiu_do_uma(fiu, cs, true, false, 0);
206
207	wpcm_fiu_set_opcode(fiu, op->addr.val & 0xff);
208	wpcm_fiu_set_addr(fiu, 0);
209	if (op->data.dir == SPI_MEM_DATA_OUT)
210		wpcm_fiu_set_data(fiu, op->data.buf.out, op->data.nbytes);
211	wpcm_fiu_do_uma(fiu, cs, false, op->data.dir == SPI_MEM_DATA_OUT, op->data.nbytes);
212
213	wpcm_fiu_ects_deassert(fiu, cs);
214
215	if (op->data.dir == SPI_MEM_DATA_IN)
216		wpcm_fiu_get_data(fiu, op->data.buf.in, op->data.nbytes);
217
218	return 0;
219}
220
221/*
222 * RDID (Read Identification) needs special handling because Linux expects to
223 * be able to read 6 ID bytes and FIU can only read up to 4 at once.
224 *
225 * We're lucky in this case, because executing the RDID instruction twice will
226 * result in the same result.
227 *
228 * What we do is as follows (C: write command/opcode byte, D: read data byte,
229 * A: write address byte):
230 *
231 *  1. C D D D
232 *  2. C A A A D D D
233 */
234static bool wpcm_fiu_rdid_match(const struct spi_mem_op *op)
235{
236	return op->cmd.opcode == 0x9f && op->addr.nbytes == 0 &&
237	       op->dummy.nbytes == 0 && op->data.nbytes == 6 &&
238	       op->data.dir == SPI_MEM_DATA_IN;
239}
240
241static int wpcm_fiu_rdid_exec(struct spi_mem *mem, const struct spi_mem_op *op)
242{
243	struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
244	int cs = mem->spi->chip_select;
245
246	/* First transfer */
247	wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
248	wpcm_fiu_set_addr(fiu, 0);
249	wpcm_fiu_do_uma(fiu, cs, false, false, 3);
250	wpcm_fiu_get_data(fiu, op->data.buf.in, 3);
251
252	/* Second transfer */
253	wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
254	wpcm_fiu_set_addr(fiu, 0);
255	wpcm_fiu_do_uma(fiu, cs, true, false, 3);
256	wpcm_fiu_get_data(fiu, op->data.buf.in + 3, 3);
257
258	return 0;
259}
260
261/*
262 * With some dummy bytes.
263 *
264 *  C A A A  X*  X D D D D
265 * [C A A A  D*][C D D D D]
266 */
267static bool wpcm_fiu_dummy_match(const struct spi_mem_op *op)
268{
269	// Opcode 0x0b (FAST READ) is treated differently in hardware
270	if (op->cmd.opcode == 0x0b)
271		return false;
272
273	return (op->addr.nbytes == 0 || op->addr.nbytes == 3) &&
274	       op->dummy.nbytes >= 1 && op->dummy.nbytes <= 5 &&
275	       op->data.nbytes <= 4;
276}
277
278static int wpcm_fiu_dummy_exec(struct spi_mem *mem, const struct spi_mem_op *op)
279{
280	struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
281	int cs = mem->spi->chip_select;
282
283	wpcm_fiu_ects_assert(fiu, cs);
284
285	/* First transfer */
286	wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
287	wpcm_fiu_set_addr(fiu, op->addr.val);
288	wpcm_fiu_do_uma(fiu, cs, op->addr.nbytes != 0, true, op->dummy.nbytes - 1);
289
290	/* Second transfer */
291	wpcm_fiu_set_opcode(fiu, 0);
292	wpcm_fiu_set_addr(fiu, 0);
293	wpcm_fiu_do_uma(fiu, cs, false, false, op->data.nbytes);
294	wpcm_fiu_get_data(fiu, op->data.buf.in, op->data.nbytes);
295
296	wpcm_fiu_ects_deassert(fiu, cs);
297
298	return 0;
299}
300
301static const struct wpcm_fiu_op_shape wpcm_fiu_op_shapes[] = {
302	{ .match = wpcm_fiu_normal_match, .exec = wpcm_fiu_normal_exec },
303	{ .match = wpcm_fiu_fast_read_match, .exec = wpcm_fiu_fast_read_exec },
304	{ .match = wpcm_fiu_4ba_match, .exec = wpcm_fiu_4ba_exec },
305	{ .match = wpcm_fiu_rdid_match, .exec = wpcm_fiu_rdid_exec },
306	{ .match = wpcm_fiu_dummy_match, .exec = wpcm_fiu_dummy_exec },
307};
308
309static const struct wpcm_fiu_op_shape *wpcm_fiu_find_op_shape(const struct spi_mem_op *op)
310{
311	size_t i;
312
313	for (i = 0; i < ARRAY_SIZE(wpcm_fiu_op_shapes); i++) {
314		const struct wpcm_fiu_op_shape *shape = &wpcm_fiu_op_shapes[i];
315
316		if (shape->match(op))
317			return shape;
318	}
319
320	return NULL;
321}
322
323static bool wpcm_fiu_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
324{
325	if (!spi_mem_default_supports_op(mem, op))
326		return false;
327
328	if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
329		return false;
330
331	if (op->cmd.buswidth > 1 || op->addr.buswidth > 1 ||
332	    op->dummy.buswidth > 1 || op->data.buswidth > 1)
333		return false;
334
335	return wpcm_fiu_find_op_shape(op) != NULL;
336}
337
338/*
339 * In order to ensure the integrity of SPI transfers performed via UMA,
340 * temporarily disable (stall) memory accesses coming from the host CPU.
341 */
342static void wpcm_fiu_stall_host(struct wpcm_fiu_spi *fiu, bool stall)
343{
344	if (fiu->shm_regmap) {
345		int res = regmap_update_bits(fiu->shm_regmap, SHM_FLASH_SIZE,
346					     SHM_FLASH_SIZE_STALL_HOST,
347					     stall ? SHM_FLASH_SIZE_STALL_HOST : 0);
348		if (res)
349			dev_warn(fiu->dev, "Failed to (un)stall host memory accesses: %d\n", res);
350	}
351}
352
353static int wpcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
354{
355	struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
356	const struct wpcm_fiu_op_shape *shape = wpcm_fiu_find_op_shape(op);
357
358	wpcm_fiu_stall_host(fiu, true);
359
360	if (shape)
361		return shape->exec(mem, op);
362
363	wpcm_fiu_stall_host(fiu, false);
364
365	return -ENOTSUPP;
366}
367
368static int wpcm_fiu_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
369{
370	if (op->data.nbytes > 4)
371		op->data.nbytes = 4;
372
373	return 0;
374}
375
376static int wpcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
377{
378	struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(desc->mem->spi->controller);
379	int cs = desc->mem->spi->chip_select;
380
381	if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
382		return -ENOTSUPP;
383
384	/*
385	 * Unfortunately, FIU only supports a 16 MiB direct mapping window (per
386	 * attached flash chip), but the SPI MEM core doesn't support partial
387	 * direct mappings. This means that we can't support direct mapping on
388	 * flashes that are bigger than 16 MiB.
389	 */
390	if (desc->info.offset + desc->info.length > MAX_MEMORY_SIZE_PER_CS)
391		return -ENOTSUPP;
392
393	/* Don't read past the memory window */
394	if (cs * MAX_MEMORY_SIZE_PER_CS + desc->info.offset + desc->info.length > fiu->memory_size)
395		return -ENOTSUPP;
396
397	return 0;
398}
399
400static ssize_t wpcm_fiu_direct_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf)
401{
402	struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(desc->mem->spi->controller);
403	int cs = desc->mem->spi->chip_select;
404
405	if (offs >= MAX_MEMORY_SIZE_PER_CS)
406		return -ENOTSUPP;
407
408	offs += cs * MAX_MEMORY_SIZE_PER_CS;
409
410	if (!fiu->memory || offs >= fiu->memory_size)
411		return -ENOTSUPP;
412
413	len = min_t(size_t, len, fiu->memory_size - offs);
414	memcpy_fromio(buf, fiu->memory + offs, len);
415
416	return len;
417}
418
419static const struct spi_controller_mem_ops wpcm_fiu_mem_ops = {
420	.adjust_op_size = wpcm_fiu_adjust_op_size,
421	.supports_op = wpcm_fiu_supports_op,
422	.exec_op = wpcm_fiu_exec_op,
423	.dirmap_create = wpcm_fiu_dirmap_create,
424	.dirmap_read = wpcm_fiu_direct_read,
425};
426
427static void wpcm_fiu_hw_init(struct wpcm_fiu_spi *fiu)
428{
429	/* Configure memory-mapped flash access */
430	writeb(FIU_BURST_CFG_R16, fiu->regs + FIU_BURST_BFG);
431	writeb(MAX_MEMORY_SIZE_TOTAL / (512 << 10), fiu->regs + FIU_CFG);
432	writeb(MAX_MEMORY_SIZE_PER_CS / (512 << 10) | BIT(6), fiu->regs + FIU_SPI_FL_CFG);
433
434	/* Deassert all manually asserted chip selects */
435	writeb(0x0f, fiu->regs + FIU_UMA_ECTS);
436}
437
438static int wpcm_fiu_probe(struct platform_device *pdev)
439{
440	struct device *dev = &pdev->dev;
441	struct spi_controller *ctrl;
442	struct wpcm_fiu_spi *fiu;
443	struct resource *res;
444
445	ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
446	if (!ctrl)
447		return -ENOMEM;
448
449	fiu = spi_controller_get_devdata(ctrl);
450	fiu->dev = dev;
451
452	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
453	fiu->regs = devm_ioremap_resource(dev, res);
454	if (IS_ERR(fiu->regs)) {
455		dev_err(dev, "Failed to map registers\n");
456		return PTR_ERR(fiu->regs);
457	}
458
459	fiu->clk = devm_clk_get_enabled(dev, NULL);
460	if (IS_ERR(fiu->clk))
461		return PTR_ERR(fiu->clk);
462
463	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory");
464	fiu->memory = devm_ioremap_resource(dev, res);
465	fiu->memory_size = min_t(size_t, resource_size(res), MAX_MEMORY_SIZE_TOTAL);
466	if (IS_ERR(fiu->memory)) {
467		dev_err(dev, "Failed to map flash memory window\n");
468		return PTR_ERR(fiu->memory);
469	}
470
471	fiu->shm_regmap = syscon_regmap_lookup_by_phandle_optional(dev->of_node, "nuvoton,shm");
472
473	wpcm_fiu_hw_init(fiu);
474
475	ctrl->bus_num = -1;
476	ctrl->mem_ops = &wpcm_fiu_mem_ops;
477	ctrl->num_chipselect = 4;
478	ctrl->dev.of_node = dev->of_node;
479
480	/*
481	 * The FIU doesn't include a clock divider, the clock is entirely
482	 * determined by the AHB3 bus clock.
483	 */
484	ctrl->min_speed_hz = clk_get_rate(fiu->clk);
485	ctrl->max_speed_hz = clk_get_rate(fiu->clk);
486
487	return devm_spi_register_controller(dev, ctrl);
488}
489
490static const struct of_device_id wpcm_fiu_dt_ids[] = {
491	{ .compatible = "nuvoton,wpcm450-fiu", },
492	{ }
493};
494MODULE_DEVICE_TABLE(of, wpcm_fiu_dt_ids);
495
496static struct platform_driver wpcm_fiu_driver = {
497	.driver = {
498		.name	= "wpcm450-fiu",
499		.bus	= &platform_bus_type,
500		.of_match_table = wpcm_fiu_dt_ids,
501	},
502	.probe      = wpcm_fiu_probe,
503};
504module_platform_driver(wpcm_fiu_driver);
505
506MODULE_DESCRIPTION("Nuvoton WPCM450 FIU SPI controller driver");
507MODULE_AUTHOR("Jonathan Neuschäfer <j.neuschaefer@gmx.net>");
508MODULE_LICENSE("GPL");