Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin
5 *
6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7 */
8#include <linux/dmaengine.h>
9#include <linux/iopoll.h>
10#include <linux/pm_runtime.h>
11#include <linux/spi/spi.h>
12#include <linux/spi/spi-mem.h>
13#include <linux/sched/task_stack.h>
14
15#include "internals.h"
16
17#define SPI_MEM_MAX_BUSWIDTH 8
18
19/**
20 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
21 * memory operation
22 * @ctlr: the SPI controller requesting this dma_map()
23 * @op: the memory operation containing the buffer to map
24 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
25 * function
26 *
27 * Some controllers might want to do DMA on the data buffer embedded in @op.
28 * This helper prepares everything for you and provides a ready-to-use
29 * sg_table. This function is not intended to be called from spi drivers.
30 * Only SPI controller drivers should use it.
31 * Note that the caller must ensure the memory region pointed by
32 * op->data.buf.{in,out} is DMA-able before calling this function.
33 *
34 * Return: 0 in case of success, a negative error code otherwise.
35 */
36int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
37 const struct spi_mem_op *op,
38 struct sg_table *sgt)
39{
40 struct device *dmadev;
41
42 if (!op->data.nbytes)
43 return -EINVAL;
44
45 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
46 dmadev = ctlr->dma_tx->device->dev;
47 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
48 dmadev = ctlr->dma_rx->device->dev;
49 else
50 dmadev = ctlr->dev.parent;
51
52 if (!dmadev)
53 return -EINVAL;
54
55 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
56 op->data.dir == SPI_MEM_DATA_IN ?
57 DMA_FROM_DEVICE : DMA_TO_DEVICE);
58}
59EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
60
61/**
62 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
63 * memory operation
64 * @ctlr: the SPI controller requesting this dma_unmap()
65 * @op: the memory operation containing the buffer to unmap
66 * @sgt: a pointer to an sg_table previously initialized by
67 * spi_controller_dma_map_mem_op_data()
68 *
69 * Some controllers might want to do DMA on the data buffer embedded in @op.
70 * This helper prepares things so that the CPU can access the
71 * op->data.buf.{in,out} buffer again.
72 *
73 * This function is not intended to be called from SPI drivers. Only SPI
74 * controller drivers should use it.
75 *
76 * This function should be called after the DMA operation has finished and is
77 * only valid if the previous spi_controller_dma_map_mem_op_data() call
78 * returned 0.
79 *
80 * Return: 0 in case of success, a negative error code otherwise.
81 */
82void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
83 const struct spi_mem_op *op,
84 struct sg_table *sgt)
85{
86 struct device *dmadev;
87
88 if (!op->data.nbytes)
89 return;
90
91 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
92 dmadev = ctlr->dma_tx->device->dev;
93 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
94 dmadev = ctlr->dma_rx->device->dev;
95 else
96 dmadev = ctlr->dev.parent;
97
98 spi_unmap_buf(ctlr, dmadev, sgt,
99 op->data.dir == SPI_MEM_DATA_IN ?
100 DMA_FROM_DEVICE : DMA_TO_DEVICE);
101}
102EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
103
104static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
105{
106 u32 mode = mem->spi->mode;
107
108 switch (buswidth) {
109 case 1:
110 return 0;
111
112 case 2:
113 if ((tx &&
114 (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
115 (!tx &&
116 (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
117 return 0;
118
119 break;
120
121 case 4:
122 if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
123 (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
124 return 0;
125
126 break;
127
128 case 8:
129 if ((tx && (mode & SPI_TX_OCTAL)) ||
130 (!tx && (mode & SPI_RX_OCTAL)))
131 return 0;
132
133 break;
134
135 default:
136 break;
137 }
138
139 return -ENOTSUPP;
140}
141
142static bool spi_mem_check_buswidth(struct spi_mem *mem,
143 const struct spi_mem_op *op)
144{
145 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
146 return false;
147
148 if (op->addr.nbytes &&
149 spi_check_buswidth_req(mem, op->addr.buswidth, true))
150 return false;
151
152 if (op->dummy.nbytes &&
153 spi_check_buswidth_req(mem, op->dummy.buswidth, true))
154 return false;
155
156 if (op->data.dir != SPI_MEM_NO_DATA &&
157 spi_check_buswidth_req(mem, op->data.buswidth,
158 op->data.dir == SPI_MEM_DATA_OUT))
159 return false;
160
161 return true;
162}
163
164bool spi_mem_default_supports_op(struct spi_mem *mem,
165 const struct spi_mem_op *op)
166{
167 struct spi_controller *ctlr = mem->spi->controller;
168 bool op_is_dtr =
169 op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr;
170
171 if (op_is_dtr) {
172 if (!spi_mem_controller_is_capable(ctlr, dtr))
173 return false;
174
175 if (op->cmd.nbytes != 2)
176 return false;
177 } else {
178 if (op->cmd.nbytes != 1)
179 return false;
180 }
181
182 if (op->data.ecc) {
183 if (!spi_mem_controller_is_capable(ctlr, ecc))
184 return false;
185 }
186
187 return spi_mem_check_buswidth(mem, op);
188}
189EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
190
191static bool spi_mem_buswidth_is_valid(u8 buswidth)
192{
193 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
194 return false;
195
196 return true;
197}
198
199static int spi_mem_check_op(const struct spi_mem_op *op)
200{
201 if (!op->cmd.buswidth || !op->cmd.nbytes)
202 return -EINVAL;
203
204 if ((op->addr.nbytes && !op->addr.buswidth) ||
205 (op->dummy.nbytes && !op->dummy.buswidth) ||
206 (op->data.nbytes && !op->data.buswidth))
207 return -EINVAL;
208
209 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
210 !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
211 !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
212 !spi_mem_buswidth_is_valid(op->data.buswidth))
213 return -EINVAL;
214
215 /* Buffers must be DMA-able. */
216 if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
217 object_is_on_stack(op->data.buf.in)))
218 return -EINVAL;
219
220 if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
221 object_is_on_stack(op->data.buf.out)))
222 return -EINVAL;
223
224 return 0;
225}
226
227static bool spi_mem_internal_supports_op(struct spi_mem *mem,
228 const struct spi_mem_op *op)
229{
230 struct spi_controller *ctlr = mem->spi->controller;
231
232 if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
233 return ctlr->mem_ops->supports_op(mem, op);
234
235 return spi_mem_default_supports_op(mem, op);
236}
237
238/**
239 * spi_mem_supports_op() - Check if a memory device and the controller it is
240 * connected to support a specific memory operation
241 * @mem: the SPI memory
242 * @op: the memory operation to check
243 *
244 * Some controllers are only supporting Single or Dual IOs, others might only
245 * support specific opcodes, or it can even be that the controller and device
246 * both support Quad IOs but the hardware prevents you from using it because
247 * only 2 IO lines are connected.
248 *
249 * This function checks whether a specific operation is supported.
250 *
251 * Return: true if @op is supported, false otherwise.
252 */
253bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
254{
255 if (spi_mem_check_op(op))
256 return false;
257
258 return spi_mem_internal_supports_op(mem, op);
259}
260EXPORT_SYMBOL_GPL(spi_mem_supports_op);
261
262static int spi_mem_access_start(struct spi_mem *mem)
263{
264 struct spi_controller *ctlr = mem->spi->controller;
265
266 /*
267 * Flush the message queue before executing our SPI memory
268 * operation to prevent preemption of regular SPI transfers.
269 */
270 spi_flush_queue(ctlr);
271
272 if (ctlr->auto_runtime_pm) {
273 int ret;
274
275 ret = pm_runtime_resume_and_get(ctlr->dev.parent);
276 if (ret < 0) {
277 dev_err(&ctlr->dev, "Failed to power device: %d\n",
278 ret);
279 return ret;
280 }
281 }
282
283 mutex_lock(&ctlr->bus_lock_mutex);
284 mutex_lock(&ctlr->io_mutex);
285
286 return 0;
287}
288
289static void spi_mem_access_end(struct spi_mem *mem)
290{
291 struct spi_controller *ctlr = mem->spi->controller;
292
293 mutex_unlock(&ctlr->io_mutex);
294 mutex_unlock(&ctlr->bus_lock_mutex);
295
296 if (ctlr->auto_runtime_pm)
297 pm_runtime_put(ctlr->dev.parent);
298}
299
300/**
301 * spi_mem_exec_op() - Execute a memory operation
302 * @mem: the SPI memory
303 * @op: the memory operation to execute
304 *
305 * Executes a memory operation.
306 *
307 * This function first checks that @op is supported and then tries to execute
308 * it.
309 *
310 * Return: 0 in case of success, a negative error code otherwise.
311 */
312int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
313{
314 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
315 struct spi_controller *ctlr = mem->spi->controller;
316 struct spi_transfer xfers[4] = { };
317 struct spi_message msg;
318 u8 *tmpbuf;
319 int ret;
320
321 ret = spi_mem_check_op(op);
322 if (ret)
323 return ret;
324
325 if (!spi_mem_internal_supports_op(mem, op))
326 return -ENOTSUPP;
327
328 if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
329 ret = spi_mem_access_start(mem);
330 if (ret)
331 return ret;
332
333 ret = ctlr->mem_ops->exec_op(mem, op);
334
335 spi_mem_access_end(mem);
336
337 /*
338 * Some controllers only optimize specific paths (typically the
339 * read path) and expect the core to use the regular SPI
340 * interface in other cases.
341 */
342 if (!ret || ret != -ENOTSUPP)
343 return ret;
344 }
345
346 tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
347
348 /*
349 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
350 * we're guaranteed that this buffer is DMA-able, as required by the
351 * SPI layer.
352 */
353 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
354 if (!tmpbuf)
355 return -ENOMEM;
356
357 spi_message_init(&msg);
358
359 tmpbuf[0] = op->cmd.opcode;
360 xfers[xferpos].tx_buf = tmpbuf;
361 xfers[xferpos].len = op->cmd.nbytes;
362 xfers[xferpos].tx_nbits = op->cmd.buswidth;
363 spi_message_add_tail(&xfers[xferpos], &msg);
364 xferpos++;
365 totalxferlen++;
366
367 if (op->addr.nbytes) {
368 int i;
369
370 for (i = 0; i < op->addr.nbytes; i++)
371 tmpbuf[i + 1] = op->addr.val >>
372 (8 * (op->addr.nbytes - i - 1));
373
374 xfers[xferpos].tx_buf = tmpbuf + 1;
375 xfers[xferpos].len = op->addr.nbytes;
376 xfers[xferpos].tx_nbits = op->addr.buswidth;
377 spi_message_add_tail(&xfers[xferpos], &msg);
378 xferpos++;
379 totalxferlen += op->addr.nbytes;
380 }
381
382 if (op->dummy.nbytes) {
383 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
384 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
385 xfers[xferpos].len = op->dummy.nbytes;
386 xfers[xferpos].tx_nbits = op->dummy.buswidth;
387 xfers[xferpos].dummy_data = 1;
388 spi_message_add_tail(&xfers[xferpos], &msg);
389 xferpos++;
390 totalxferlen += op->dummy.nbytes;
391 }
392
393 if (op->data.nbytes) {
394 if (op->data.dir == SPI_MEM_DATA_IN) {
395 xfers[xferpos].rx_buf = op->data.buf.in;
396 xfers[xferpos].rx_nbits = op->data.buswidth;
397 } else {
398 xfers[xferpos].tx_buf = op->data.buf.out;
399 xfers[xferpos].tx_nbits = op->data.buswidth;
400 }
401
402 xfers[xferpos].len = op->data.nbytes;
403 spi_message_add_tail(&xfers[xferpos], &msg);
404 xferpos++;
405 totalxferlen += op->data.nbytes;
406 }
407
408 ret = spi_sync(mem->spi, &msg);
409
410 kfree(tmpbuf);
411
412 if (ret)
413 return ret;
414
415 if (msg.actual_length != totalxferlen)
416 return -EIO;
417
418 return 0;
419}
420EXPORT_SYMBOL_GPL(spi_mem_exec_op);
421
422/**
423 * spi_mem_get_name() - Return the SPI mem device name to be used by the
424 * upper layer if necessary
425 * @mem: the SPI memory
426 *
427 * This function allows SPI mem users to retrieve the SPI mem device name.
428 * It is useful if the upper layer needs to expose a custom name for
429 * compatibility reasons.
430 *
431 * Return: a string containing the name of the memory device to be used
432 * by the SPI mem user
433 */
434const char *spi_mem_get_name(struct spi_mem *mem)
435{
436 return mem->name;
437}
438EXPORT_SYMBOL_GPL(spi_mem_get_name);
439
440/**
441 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
442 * match controller limitations
443 * @mem: the SPI memory
444 * @op: the operation to adjust
445 *
446 * Some controllers have FIFO limitations and must split a data transfer
447 * operation into multiple ones, others require a specific alignment for
448 * optimized accesses. This function allows SPI mem drivers to split a single
449 * operation into multiple sub-operations when required.
450 *
451 * Return: a negative error code if the controller can't properly adjust @op,
452 * 0 otherwise. Note that @op->data.nbytes will be updated if @op
453 * can't be handled in a single step.
454 */
455int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
456{
457 struct spi_controller *ctlr = mem->spi->controller;
458 size_t len;
459
460 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
461 return ctlr->mem_ops->adjust_op_size(mem, op);
462
463 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
464 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
465
466 if (len > spi_max_transfer_size(mem->spi))
467 return -EINVAL;
468
469 op->data.nbytes = min3((size_t)op->data.nbytes,
470 spi_max_transfer_size(mem->spi),
471 spi_max_message_size(mem->spi) -
472 len);
473 if (!op->data.nbytes)
474 return -EINVAL;
475 }
476
477 return 0;
478}
479EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
480
481static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
482 u64 offs, size_t len, void *buf)
483{
484 struct spi_mem_op op = desc->info.op_tmpl;
485 int ret;
486
487 op.addr.val = desc->info.offset + offs;
488 op.data.buf.in = buf;
489 op.data.nbytes = len;
490 ret = spi_mem_adjust_op_size(desc->mem, &op);
491 if (ret)
492 return ret;
493
494 ret = spi_mem_exec_op(desc->mem, &op);
495 if (ret)
496 return ret;
497
498 return op.data.nbytes;
499}
500
501static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
502 u64 offs, size_t len, const void *buf)
503{
504 struct spi_mem_op op = desc->info.op_tmpl;
505 int ret;
506
507 op.addr.val = desc->info.offset + offs;
508 op.data.buf.out = buf;
509 op.data.nbytes = len;
510 ret = spi_mem_adjust_op_size(desc->mem, &op);
511 if (ret)
512 return ret;
513
514 ret = spi_mem_exec_op(desc->mem, &op);
515 if (ret)
516 return ret;
517
518 return op.data.nbytes;
519}
520
521/**
522 * spi_mem_dirmap_create() - Create a direct mapping descriptor
523 * @mem: SPI mem device this direct mapping should be created for
524 * @info: direct mapping information
525 *
526 * This function is creating a direct mapping descriptor which can then be used
527 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
528 * If the SPI controller driver does not support direct mapping, this function
529 * falls back to an implementation using spi_mem_exec_op(), so that the caller
530 * doesn't have to bother implementing a fallback on his own.
531 *
532 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
533 */
534struct spi_mem_dirmap_desc *
535spi_mem_dirmap_create(struct spi_mem *mem,
536 const struct spi_mem_dirmap_info *info)
537{
538 struct spi_controller *ctlr = mem->spi->controller;
539 struct spi_mem_dirmap_desc *desc;
540 int ret = -ENOTSUPP;
541
542 /* Make sure the number of address cycles is between 1 and 8 bytes. */
543 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
544 return ERR_PTR(-EINVAL);
545
546 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
547 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
548 return ERR_PTR(-EINVAL);
549
550 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
551 if (!desc)
552 return ERR_PTR(-ENOMEM);
553
554 desc->mem = mem;
555 desc->info = *info;
556 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
557 ret = ctlr->mem_ops->dirmap_create(desc);
558
559 if (ret) {
560 desc->nodirmap = true;
561 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
562 ret = -ENOTSUPP;
563 else
564 ret = 0;
565 }
566
567 if (ret) {
568 kfree(desc);
569 return ERR_PTR(ret);
570 }
571
572 return desc;
573}
574EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
575
576/**
577 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
578 * @desc: the direct mapping descriptor to destroy
579 *
580 * This function destroys a direct mapping descriptor previously created by
581 * spi_mem_dirmap_create().
582 */
583void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
584{
585 struct spi_controller *ctlr = desc->mem->spi->controller;
586
587 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
588 ctlr->mem_ops->dirmap_destroy(desc);
589
590 kfree(desc);
591}
592EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
593
594static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
595{
596 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
597
598 spi_mem_dirmap_destroy(desc);
599}
600
601/**
602 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
603 * it to a device
604 * @dev: device the dirmap desc will be attached to
605 * @mem: SPI mem device this direct mapping should be created for
606 * @info: direct mapping information
607 *
608 * devm_ variant of the spi_mem_dirmap_create() function. See
609 * spi_mem_dirmap_create() for more details.
610 *
611 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
612 */
613struct spi_mem_dirmap_desc *
614devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
615 const struct spi_mem_dirmap_info *info)
616{
617 struct spi_mem_dirmap_desc **ptr, *desc;
618
619 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
620 GFP_KERNEL);
621 if (!ptr)
622 return ERR_PTR(-ENOMEM);
623
624 desc = spi_mem_dirmap_create(mem, info);
625 if (IS_ERR(desc)) {
626 devres_free(ptr);
627 } else {
628 *ptr = desc;
629 devres_add(dev, ptr);
630 }
631
632 return desc;
633}
634EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
635
636static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
637{
638 struct spi_mem_dirmap_desc **ptr = res;
639
640 if (WARN_ON(!ptr || !*ptr))
641 return 0;
642
643 return *ptr == data;
644}
645
646/**
647 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
648 * to a device
649 * @dev: device the dirmap desc is attached to
650 * @desc: the direct mapping descriptor to destroy
651 *
652 * devm_ variant of the spi_mem_dirmap_destroy() function. See
653 * spi_mem_dirmap_destroy() for more details.
654 */
655void devm_spi_mem_dirmap_destroy(struct device *dev,
656 struct spi_mem_dirmap_desc *desc)
657{
658 devres_release(dev, devm_spi_mem_dirmap_release,
659 devm_spi_mem_dirmap_match, desc);
660}
661EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
662
663/**
664 * spi_mem_dirmap_read() - Read data through a direct mapping
665 * @desc: direct mapping descriptor
666 * @offs: offset to start reading from. Note that this is not an absolute
667 * offset, but the offset within the direct mapping which already has
668 * its own offset
669 * @len: length in bytes
670 * @buf: destination buffer. This buffer must be DMA-able
671 *
672 * This function reads data from a memory device using a direct mapping
673 * previously instantiated with spi_mem_dirmap_create().
674 *
675 * Return: the amount of data read from the memory device or a negative error
676 * code. Note that the returned size might be smaller than @len, and the caller
677 * is responsible for calling spi_mem_dirmap_read() again when that happens.
678 */
679ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
680 u64 offs, size_t len, void *buf)
681{
682 struct spi_controller *ctlr = desc->mem->spi->controller;
683 ssize_t ret;
684
685 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
686 return -EINVAL;
687
688 if (!len)
689 return 0;
690
691 if (desc->nodirmap) {
692 ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
693 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
694 ret = spi_mem_access_start(desc->mem);
695 if (ret)
696 return ret;
697
698 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
699
700 spi_mem_access_end(desc->mem);
701 } else {
702 ret = -ENOTSUPP;
703 }
704
705 return ret;
706}
707EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
708
709/**
710 * spi_mem_dirmap_write() - Write data through a direct mapping
711 * @desc: direct mapping descriptor
712 * @offs: offset to start writing from. Note that this is not an absolute
713 * offset, but the offset within the direct mapping which already has
714 * its own offset
715 * @len: length in bytes
716 * @buf: source buffer. This buffer must be DMA-able
717 *
718 * This function writes data to a memory device using a direct mapping
719 * previously instantiated with spi_mem_dirmap_create().
720 *
721 * Return: the amount of data written to the memory device or a negative error
722 * code. Note that the returned size might be smaller than @len, and the caller
723 * is responsible for calling spi_mem_dirmap_write() again when that happens.
724 */
725ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
726 u64 offs, size_t len, const void *buf)
727{
728 struct spi_controller *ctlr = desc->mem->spi->controller;
729 ssize_t ret;
730
731 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
732 return -EINVAL;
733
734 if (!len)
735 return 0;
736
737 if (desc->nodirmap) {
738 ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
739 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
740 ret = spi_mem_access_start(desc->mem);
741 if (ret)
742 return ret;
743
744 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
745
746 spi_mem_access_end(desc->mem);
747 } else {
748 ret = -ENOTSUPP;
749 }
750
751 return ret;
752}
753EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
754
755static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
756{
757 return container_of(drv, struct spi_mem_driver, spidrv.driver);
758}
759
760static int spi_mem_read_status(struct spi_mem *mem,
761 const struct spi_mem_op *op,
762 u16 *status)
763{
764 const u8 *bytes = (u8 *)op->data.buf.in;
765 int ret;
766
767 ret = spi_mem_exec_op(mem, op);
768 if (ret)
769 return ret;
770
771 if (op->data.nbytes > 1)
772 *status = ((u16)bytes[0] << 8) | bytes[1];
773 else
774 *status = bytes[0];
775
776 return 0;
777}
778
779/**
780 * spi_mem_poll_status() - Poll memory device status
781 * @mem: SPI memory device
782 * @op: the memory operation to execute
783 * @mask: status bitmask to ckeck
784 * @match: (status & mask) expected value
785 * @initial_delay_us: delay in us before starting to poll
786 * @polling_delay_us: time to sleep between reads in us
787 * @timeout_ms: timeout in milliseconds
788 *
789 * This function polls a status register and returns when
790 * (status & mask) == match or when the timeout has expired.
791 *
792 * Return: 0 in case of success, -ETIMEDOUT in case of error,
793 * -EOPNOTSUPP if not supported.
794 */
795int spi_mem_poll_status(struct spi_mem *mem,
796 const struct spi_mem_op *op,
797 u16 mask, u16 match,
798 unsigned long initial_delay_us,
799 unsigned long polling_delay_us,
800 u16 timeout_ms)
801{
802 struct spi_controller *ctlr = mem->spi->controller;
803 int ret = -EOPNOTSUPP;
804 int read_status_ret;
805 u16 status;
806
807 if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
808 op->data.dir != SPI_MEM_DATA_IN)
809 return -EINVAL;
810
811 if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) {
812 ret = spi_mem_access_start(mem);
813 if (ret)
814 return ret;
815
816 ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
817 initial_delay_us, polling_delay_us,
818 timeout_ms);
819
820 spi_mem_access_end(mem);
821 }
822
823 if (ret == -EOPNOTSUPP) {
824 if (!spi_mem_supports_op(mem, op))
825 return ret;
826
827 if (initial_delay_us < 10)
828 udelay(initial_delay_us);
829 else
830 usleep_range((initial_delay_us >> 2) + 1,
831 initial_delay_us);
832
833 ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
834 (read_status_ret || ((status) & mask) == match),
835 polling_delay_us, timeout_ms * 1000, false, mem,
836 op, &status);
837 if (read_status_ret)
838 return read_status_ret;
839 }
840
841 return ret;
842}
843EXPORT_SYMBOL_GPL(spi_mem_poll_status);
844
845static int spi_mem_probe(struct spi_device *spi)
846{
847 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
848 struct spi_controller *ctlr = spi->controller;
849 struct spi_mem *mem;
850
851 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
852 if (!mem)
853 return -ENOMEM;
854
855 mem->spi = spi;
856
857 if (ctlr->mem_ops && ctlr->mem_ops->get_name)
858 mem->name = ctlr->mem_ops->get_name(mem);
859 else
860 mem->name = dev_name(&spi->dev);
861
862 if (IS_ERR_OR_NULL(mem->name))
863 return PTR_ERR_OR_ZERO(mem->name);
864
865 spi_set_drvdata(spi, mem);
866
867 return memdrv->probe(mem);
868}
869
870static void spi_mem_remove(struct spi_device *spi)
871{
872 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
873 struct spi_mem *mem = spi_get_drvdata(spi);
874
875 if (memdrv->remove)
876 memdrv->remove(mem);
877}
878
879static void spi_mem_shutdown(struct spi_device *spi)
880{
881 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
882 struct spi_mem *mem = spi_get_drvdata(spi);
883
884 if (memdrv->shutdown)
885 memdrv->shutdown(mem);
886}
887
888/**
889 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
890 * @memdrv: the SPI memory driver to register
891 * @owner: the owner of this driver
892 *
893 * Registers a SPI memory driver.
894 *
895 * Return: 0 in case of success, a negative error core otherwise.
896 */
897
898int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
899 struct module *owner)
900{
901 memdrv->spidrv.probe = spi_mem_probe;
902 memdrv->spidrv.remove = spi_mem_remove;
903 memdrv->spidrv.shutdown = spi_mem_shutdown;
904
905 return __spi_register_driver(owner, &memdrv->spidrv);
906}
907EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
908
909/**
910 * spi_mem_driver_unregister() - Unregister a SPI memory driver
911 * @memdrv: the SPI memory driver to unregister
912 *
913 * Unregisters a SPI memory driver.
914 */
915void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
916{
917 spi_unregister_driver(&memdrv->spidrv);
918}
919EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin
5 *
6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7 */
8#include <linux/dmaengine.h>
9#include <linux/pm_runtime.h>
10#include <linux/spi/spi.h>
11#include <linux/spi/spi-mem.h>
12
13#include "internals.h"
14
15#define SPI_MEM_MAX_BUSWIDTH 8
16
17/**
18 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
19 * memory operation
20 * @ctlr: the SPI controller requesting this dma_map()
21 * @op: the memory operation containing the buffer to map
22 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
23 * function
24 *
25 * Some controllers might want to do DMA on the data buffer embedded in @op.
26 * This helper prepares everything for you and provides a ready-to-use
27 * sg_table. This function is not intended to be called from spi drivers.
28 * Only SPI controller drivers should use it.
29 * Note that the caller must ensure the memory region pointed by
30 * op->data.buf.{in,out} is DMA-able before calling this function.
31 *
32 * Return: 0 in case of success, a negative error code otherwise.
33 */
34int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
35 const struct spi_mem_op *op,
36 struct sg_table *sgt)
37{
38 struct device *dmadev;
39
40 if (!op->data.nbytes)
41 return -EINVAL;
42
43 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
44 dmadev = ctlr->dma_tx->device->dev;
45 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
46 dmadev = ctlr->dma_rx->device->dev;
47 else
48 dmadev = ctlr->dev.parent;
49
50 if (!dmadev)
51 return -EINVAL;
52
53 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
54 op->data.dir == SPI_MEM_DATA_IN ?
55 DMA_FROM_DEVICE : DMA_TO_DEVICE);
56}
57EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
58
59/**
60 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
61 * memory operation
62 * @ctlr: the SPI controller requesting this dma_unmap()
63 * @op: the memory operation containing the buffer to unmap
64 * @sgt: a pointer to an sg_table previously initialized by
65 * spi_controller_dma_map_mem_op_data()
66 *
67 * Some controllers might want to do DMA on the data buffer embedded in @op.
68 * This helper prepares things so that the CPU can access the
69 * op->data.buf.{in,out} buffer again.
70 *
71 * This function is not intended to be called from SPI drivers. Only SPI
72 * controller drivers should use it.
73 *
74 * This function should be called after the DMA operation has finished and is
75 * only valid if the previous spi_controller_dma_map_mem_op_data() call
76 * returned 0.
77 *
78 * Return: 0 in case of success, a negative error code otherwise.
79 */
80void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
81 const struct spi_mem_op *op,
82 struct sg_table *sgt)
83{
84 struct device *dmadev;
85
86 if (!op->data.nbytes)
87 return;
88
89 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
90 dmadev = ctlr->dma_tx->device->dev;
91 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
92 dmadev = ctlr->dma_rx->device->dev;
93 else
94 dmadev = ctlr->dev.parent;
95
96 spi_unmap_buf(ctlr, dmadev, sgt,
97 op->data.dir == SPI_MEM_DATA_IN ?
98 DMA_FROM_DEVICE : DMA_TO_DEVICE);
99}
100EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
101
102static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
103{
104 u32 mode = mem->spi->mode;
105
106 switch (buswidth) {
107 case 1:
108 return 0;
109
110 case 2:
111 if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
112 (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
113 return 0;
114
115 break;
116
117 case 4:
118 if ((tx && (mode & SPI_TX_QUAD)) ||
119 (!tx && (mode & SPI_RX_QUAD)))
120 return 0;
121
122 break;
123
124 case 8:
125 if ((tx && (mode & SPI_TX_OCTAL)) ||
126 (!tx && (mode & SPI_RX_OCTAL)))
127 return 0;
128
129 break;
130
131 default:
132 break;
133 }
134
135 return -ENOTSUPP;
136}
137
138bool spi_mem_default_supports_op(struct spi_mem *mem,
139 const struct spi_mem_op *op)
140{
141 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
142 return false;
143
144 if (op->addr.nbytes &&
145 spi_check_buswidth_req(mem, op->addr.buswidth, true))
146 return false;
147
148 if (op->dummy.nbytes &&
149 spi_check_buswidth_req(mem, op->dummy.buswidth, true))
150 return false;
151
152 if (op->data.dir != SPI_MEM_NO_DATA &&
153 spi_check_buswidth_req(mem, op->data.buswidth,
154 op->data.dir == SPI_MEM_DATA_OUT))
155 return false;
156
157 return true;
158}
159EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
160
161static bool spi_mem_buswidth_is_valid(u8 buswidth)
162{
163 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
164 return false;
165
166 return true;
167}
168
169static int spi_mem_check_op(const struct spi_mem_op *op)
170{
171 if (!op->cmd.buswidth)
172 return -EINVAL;
173
174 if ((op->addr.nbytes && !op->addr.buswidth) ||
175 (op->dummy.nbytes && !op->dummy.buswidth) ||
176 (op->data.nbytes && !op->data.buswidth))
177 return -EINVAL;
178
179 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
180 !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
181 !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
182 !spi_mem_buswidth_is_valid(op->data.buswidth))
183 return -EINVAL;
184
185 return 0;
186}
187
188static bool spi_mem_internal_supports_op(struct spi_mem *mem,
189 const struct spi_mem_op *op)
190{
191 struct spi_controller *ctlr = mem->spi->controller;
192
193 if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
194 return ctlr->mem_ops->supports_op(mem, op);
195
196 return spi_mem_default_supports_op(mem, op);
197}
198
199/**
200 * spi_mem_supports_op() - Check if a memory device and the controller it is
201 * connected to support a specific memory operation
202 * @mem: the SPI memory
203 * @op: the memory operation to check
204 *
205 * Some controllers are only supporting Single or Dual IOs, others might only
206 * support specific opcodes, or it can even be that the controller and device
207 * both support Quad IOs but the hardware prevents you from using it because
208 * only 2 IO lines are connected.
209 *
210 * This function checks whether a specific operation is supported.
211 *
212 * Return: true if @op is supported, false otherwise.
213 */
214bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
215{
216 if (spi_mem_check_op(op))
217 return false;
218
219 return spi_mem_internal_supports_op(mem, op);
220}
221EXPORT_SYMBOL_GPL(spi_mem_supports_op);
222
223static int spi_mem_access_start(struct spi_mem *mem)
224{
225 struct spi_controller *ctlr = mem->spi->controller;
226
227 /*
228 * Flush the message queue before executing our SPI memory
229 * operation to prevent preemption of regular SPI transfers.
230 */
231 spi_flush_queue(ctlr);
232
233 if (ctlr->auto_runtime_pm) {
234 int ret;
235
236 ret = pm_runtime_get_sync(ctlr->dev.parent);
237 if (ret < 0) {
238 dev_err(&ctlr->dev, "Failed to power device: %d\n",
239 ret);
240 return ret;
241 }
242 }
243
244 mutex_lock(&ctlr->bus_lock_mutex);
245 mutex_lock(&ctlr->io_mutex);
246
247 return 0;
248}
249
250static void spi_mem_access_end(struct spi_mem *mem)
251{
252 struct spi_controller *ctlr = mem->spi->controller;
253
254 mutex_unlock(&ctlr->io_mutex);
255 mutex_unlock(&ctlr->bus_lock_mutex);
256
257 if (ctlr->auto_runtime_pm)
258 pm_runtime_put(ctlr->dev.parent);
259}
260
261/**
262 * spi_mem_exec_op() - Execute a memory operation
263 * @mem: the SPI memory
264 * @op: the memory operation to execute
265 *
266 * Executes a memory operation.
267 *
268 * This function first checks that @op is supported and then tries to execute
269 * it.
270 *
271 * Return: 0 in case of success, a negative error code otherwise.
272 */
273int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
274{
275 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
276 struct spi_controller *ctlr = mem->spi->controller;
277 struct spi_transfer xfers[4] = { };
278 struct spi_message msg;
279 u8 *tmpbuf;
280 int ret;
281
282 ret = spi_mem_check_op(op);
283 if (ret)
284 return ret;
285
286 if (!spi_mem_internal_supports_op(mem, op))
287 return -ENOTSUPP;
288
289 if (ctlr->mem_ops) {
290 ret = spi_mem_access_start(mem);
291 if (ret)
292 return ret;
293
294 ret = ctlr->mem_ops->exec_op(mem, op);
295
296 spi_mem_access_end(mem);
297
298 /*
299 * Some controllers only optimize specific paths (typically the
300 * read path) and expect the core to use the regular SPI
301 * interface in other cases.
302 */
303 if (!ret || ret != -ENOTSUPP)
304 return ret;
305 }
306
307 tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
308 op->dummy.nbytes;
309
310 /*
311 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
312 * we're guaranteed that this buffer is DMA-able, as required by the
313 * SPI layer.
314 */
315 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
316 if (!tmpbuf)
317 return -ENOMEM;
318
319 spi_message_init(&msg);
320
321 tmpbuf[0] = op->cmd.opcode;
322 xfers[xferpos].tx_buf = tmpbuf;
323 xfers[xferpos].len = sizeof(op->cmd.opcode);
324 xfers[xferpos].tx_nbits = op->cmd.buswidth;
325 spi_message_add_tail(&xfers[xferpos], &msg);
326 xferpos++;
327 totalxferlen++;
328
329 if (op->addr.nbytes) {
330 int i;
331
332 for (i = 0; i < op->addr.nbytes; i++)
333 tmpbuf[i + 1] = op->addr.val >>
334 (8 * (op->addr.nbytes - i - 1));
335
336 xfers[xferpos].tx_buf = tmpbuf + 1;
337 xfers[xferpos].len = op->addr.nbytes;
338 xfers[xferpos].tx_nbits = op->addr.buswidth;
339 spi_message_add_tail(&xfers[xferpos], &msg);
340 xferpos++;
341 totalxferlen += op->addr.nbytes;
342 }
343
344 if (op->dummy.nbytes) {
345 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
346 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
347 xfers[xferpos].len = op->dummy.nbytes;
348 xfers[xferpos].tx_nbits = op->dummy.buswidth;
349 spi_message_add_tail(&xfers[xferpos], &msg);
350 xferpos++;
351 totalxferlen += op->dummy.nbytes;
352 }
353
354 if (op->data.nbytes) {
355 if (op->data.dir == SPI_MEM_DATA_IN) {
356 xfers[xferpos].rx_buf = op->data.buf.in;
357 xfers[xferpos].rx_nbits = op->data.buswidth;
358 } else {
359 xfers[xferpos].tx_buf = op->data.buf.out;
360 xfers[xferpos].tx_nbits = op->data.buswidth;
361 }
362
363 xfers[xferpos].len = op->data.nbytes;
364 spi_message_add_tail(&xfers[xferpos], &msg);
365 xferpos++;
366 totalxferlen += op->data.nbytes;
367 }
368
369 ret = spi_sync(mem->spi, &msg);
370
371 kfree(tmpbuf);
372
373 if (ret)
374 return ret;
375
376 if (msg.actual_length != totalxferlen)
377 return -EIO;
378
379 return 0;
380}
381EXPORT_SYMBOL_GPL(spi_mem_exec_op);
382
383/**
384 * spi_mem_get_name() - Return the SPI mem device name to be used by the
385 * upper layer if necessary
386 * @mem: the SPI memory
387 *
388 * This function allows SPI mem users to retrieve the SPI mem device name.
389 * It is useful if the upper layer needs to expose a custom name for
390 * compatibility reasons.
391 *
392 * Return: a string containing the name of the memory device to be used
393 * by the SPI mem user
394 */
395const char *spi_mem_get_name(struct spi_mem *mem)
396{
397 return mem->name;
398}
399EXPORT_SYMBOL_GPL(spi_mem_get_name);
400
401/**
402 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
403 * match controller limitations
404 * @mem: the SPI memory
405 * @op: the operation to adjust
406 *
407 * Some controllers have FIFO limitations and must split a data transfer
408 * operation into multiple ones, others require a specific alignment for
409 * optimized accesses. This function allows SPI mem drivers to split a single
410 * operation into multiple sub-operations when required.
411 *
412 * Return: a negative error code if the controller can't properly adjust @op,
413 * 0 otherwise. Note that @op->data.nbytes will be updated if @op
414 * can't be handled in a single step.
415 */
416int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
417{
418 struct spi_controller *ctlr = mem->spi->controller;
419 size_t len;
420
421 len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
422
423 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
424 return ctlr->mem_ops->adjust_op_size(mem, op);
425
426 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
427 if (len > spi_max_transfer_size(mem->spi))
428 return -EINVAL;
429
430 op->data.nbytes = min3((size_t)op->data.nbytes,
431 spi_max_transfer_size(mem->spi),
432 spi_max_message_size(mem->spi) -
433 len);
434 if (!op->data.nbytes)
435 return -EINVAL;
436 }
437
438 return 0;
439}
440EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
441
442static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
443 u64 offs, size_t len, void *buf)
444{
445 struct spi_mem_op op = desc->info.op_tmpl;
446 int ret;
447
448 op.addr.val = desc->info.offset + offs;
449 op.data.buf.in = buf;
450 op.data.nbytes = len;
451 ret = spi_mem_adjust_op_size(desc->mem, &op);
452 if (ret)
453 return ret;
454
455 ret = spi_mem_exec_op(desc->mem, &op);
456 if (ret)
457 return ret;
458
459 return op.data.nbytes;
460}
461
462static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
463 u64 offs, size_t len, const void *buf)
464{
465 struct spi_mem_op op = desc->info.op_tmpl;
466 int ret;
467
468 op.addr.val = desc->info.offset + offs;
469 op.data.buf.out = buf;
470 op.data.nbytes = len;
471 ret = spi_mem_adjust_op_size(desc->mem, &op);
472 if (ret)
473 return ret;
474
475 ret = spi_mem_exec_op(desc->mem, &op);
476 if (ret)
477 return ret;
478
479 return op.data.nbytes;
480}
481
482/**
483 * spi_mem_dirmap_create() - Create a direct mapping descriptor
484 * @mem: SPI mem device this direct mapping should be created for
485 * @info: direct mapping information
486 *
487 * This function is creating a direct mapping descriptor which can then be used
488 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
489 * If the SPI controller driver does not support direct mapping, this function
490 * fallback to an implementation using spi_mem_exec_op(), so that the caller
491 * doesn't have to bother implementing a fallback on his own.
492 *
493 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
494 */
495struct spi_mem_dirmap_desc *
496spi_mem_dirmap_create(struct spi_mem *mem,
497 const struct spi_mem_dirmap_info *info)
498{
499 struct spi_controller *ctlr = mem->spi->controller;
500 struct spi_mem_dirmap_desc *desc;
501 int ret = -ENOTSUPP;
502
503 /* Make sure the number of address cycles is between 1 and 8 bytes. */
504 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
505 return ERR_PTR(-EINVAL);
506
507 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
508 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
509 return ERR_PTR(-EINVAL);
510
511 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
512 if (!desc)
513 return ERR_PTR(-ENOMEM);
514
515 desc->mem = mem;
516 desc->info = *info;
517 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
518 ret = ctlr->mem_ops->dirmap_create(desc);
519
520 if (ret) {
521 desc->nodirmap = true;
522 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
523 ret = -ENOTSUPP;
524 else
525 ret = 0;
526 }
527
528 if (ret) {
529 kfree(desc);
530 return ERR_PTR(ret);
531 }
532
533 return desc;
534}
535EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
536
537/**
538 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
539 * @desc: the direct mapping descriptor to destroy
540 *
541 * This function destroys a direct mapping descriptor previously created by
542 * spi_mem_dirmap_create().
543 */
544void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
545{
546 struct spi_controller *ctlr = desc->mem->spi->controller;
547
548 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
549 ctlr->mem_ops->dirmap_destroy(desc);
550
551 kfree(desc);
552}
553EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
554
555static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
556{
557 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
558
559 spi_mem_dirmap_destroy(desc);
560}
561
562/**
563 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
564 * it to a device
565 * @dev: device the dirmap desc will be attached to
566 * @mem: SPI mem device this direct mapping should be created for
567 * @info: direct mapping information
568 *
569 * devm_ variant of the spi_mem_dirmap_create() function. See
570 * spi_mem_dirmap_create() for more details.
571 *
572 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
573 */
574struct spi_mem_dirmap_desc *
575devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
576 const struct spi_mem_dirmap_info *info)
577{
578 struct spi_mem_dirmap_desc **ptr, *desc;
579
580 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
581 GFP_KERNEL);
582 if (!ptr)
583 return ERR_PTR(-ENOMEM);
584
585 desc = spi_mem_dirmap_create(mem, info);
586 if (IS_ERR(desc)) {
587 devres_free(ptr);
588 } else {
589 *ptr = desc;
590 devres_add(dev, ptr);
591 }
592
593 return desc;
594}
595EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
596
597static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
598{
599 struct spi_mem_dirmap_desc **ptr = res;
600
601 if (WARN_ON(!ptr || !*ptr))
602 return 0;
603
604 return *ptr == data;
605}
606
607/**
608 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
609 * to a device
610 * @dev: device the dirmap desc is attached to
611 * @desc: the direct mapping descriptor to destroy
612 *
613 * devm_ variant of the spi_mem_dirmap_destroy() function. See
614 * spi_mem_dirmap_destroy() for more details.
615 */
616void devm_spi_mem_dirmap_destroy(struct device *dev,
617 struct spi_mem_dirmap_desc *desc)
618{
619 devres_release(dev, devm_spi_mem_dirmap_release,
620 devm_spi_mem_dirmap_match, desc);
621}
622EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
623
624/**
625 * spi_mem_dirmap_read() - Read data through a direct mapping
626 * @desc: direct mapping descriptor
627 * @offs: offset to start reading from. Note that this is not an absolute
628 * offset, but the offset within the direct mapping which already has
629 * its own offset
630 * @len: length in bytes
631 * @buf: destination buffer. This buffer must be DMA-able
632 *
633 * This function reads data from a memory device using a direct mapping
634 * previously instantiated with spi_mem_dirmap_create().
635 *
636 * Return: the amount of data read from the memory device or a negative error
637 * code. Note that the returned size might be smaller than @len, and the caller
638 * is responsible for calling spi_mem_dirmap_read() again when that happens.
639 */
640ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
641 u64 offs, size_t len, void *buf)
642{
643 struct spi_controller *ctlr = desc->mem->spi->controller;
644 ssize_t ret;
645
646 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
647 return -EINVAL;
648
649 if (!len)
650 return 0;
651
652 if (desc->nodirmap) {
653 ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
654 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
655 ret = spi_mem_access_start(desc->mem);
656 if (ret)
657 return ret;
658
659 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
660
661 spi_mem_access_end(desc->mem);
662 } else {
663 ret = -ENOTSUPP;
664 }
665
666 return ret;
667}
668EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
669
670/**
671 * spi_mem_dirmap_write() - Write data through a direct mapping
672 * @desc: direct mapping descriptor
673 * @offs: offset to start writing from. Note that this is not an absolute
674 * offset, but the offset within the direct mapping which already has
675 * its own offset
676 * @len: length in bytes
677 * @buf: source buffer. This buffer must be DMA-able
678 *
679 * This function writes data to a memory device using a direct mapping
680 * previously instantiated with spi_mem_dirmap_create().
681 *
682 * Return: the amount of data written to the memory device or a negative error
683 * code. Note that the returned size might be smaller than @len, and the caller
684 * is responsible for calling spi_mem_dirmap_write() again when that happens.
685 */
686ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
687 u64 offs, size_t len, const void *buf)
688{
689 struct spi_controller *ctlr = desc->mem->spi->controller;
690 ssize_t ret;
691
692 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
693 return -EINVAL;
694
695 if (!len)
696 return 0;
697
698 if (desc->nodirmap) {
699 ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
700 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
701 ret = spi_mem_access_start(desc->mem);
702 if (ret)
703 return ret;
704
705 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
706
707 spi_mem_access_end(desc->mem);
708 } else {
709 ret = -ENOTSUPP;
710 }
711
712 return ret;
713}
714EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
715
716static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
717{
718 return container_of(drv, struct spi_mem_driver, spidrv.driver);
719}
720
721static int spi_mem_probe(struct spi_device *spi)
722{
723 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
724 struct spi_controller *ctlr = spi->controller;
725 struct spi_mem *mem;
726
727 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
728 if (!mem)
729 return -ENOMEM;
730
731 mem->spi = spi;
732
733 if (ctlr->mem_ops && ctlr->mem_ops->get_name)
734 mem->name = ctlr->mem_ops->get_name(mem);
735 else
736 mem->name = dev_name(&spi->dev);
737
738 if (IS_ERR_OR_NULL(mem->name))
739 return PTR_ERR(mem->name);
740
741 spi_set_drvdata(spi, mem);
742
743 return memdrv->probe(mem);
744}
745
746static int spi_mem_remove(struct spi_device *spi)
747{
748 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
749 struct spi_mem *mem = spi_get_drvdata(spi);
750
751 if (memdrv->remove)
752 return memdrv->remove(mem);
753
754 return 0;
755}
756
757static void spi_mem_shutdown(struct spi_device *spi)
758{
759 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
760 struct spi_mem *mem = spi_get_drvdata(spi);
761
762 if (memdrv->shutdown)
763 memdrv->shutdown(mem);
764}
765
766/**
767 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
768 * @memdrv: the SPI memory driver to register
769 * @owner: the owner of this driver
770 *
771 * Registers a SPI memory driver.
772 *
773 * Return: 0 in case of success, a negative error core otherwise.
774 */
775
776int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
777 struct module *owner)
778{
779 memdrv->spidrv.probe = spi_mem_probe;
780 memdrv->spidrv.remove = spi_mem_remove;
781 memdrv->spidrv.shutdown = spi_mem_shutdown;
782
783 return __spi_register_driver(owner, &memdrv->spidrv);
784}
785EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
786
787/**
788 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
789 * @memdrv: the SPI memory driver to unregister
790 *
791 * Unregisters a SPI memory driver.
792 */
793void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
794{
795 spi_unregister_driver(&memdrv->spidrv);
796}
797EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);