Loading...
1/*
2 * SPI init/core code
3 *
4 * Copyright (C) 2005 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/cache.h>
25#include <linux/mutex.h>
26#include <linux/of_device.h>
27#include <linux/slab.h>
28#include <linux/mod_devicetable.h>
29#include <linux/spi/spi.h>
30#include <linux/of_spi.h>
31#include <linux/pm_runtime.h>
32
33static void spidev_release(struct device *dev)
34{
35 struct spi_device *spi = to_spi_device(dev);
36
37 /* spi masters may cleanup for released devices */
38 if (spi->master->cleanup)
39 spi->master->cleanup(spi);
40
41 spi_master_put(spi->master);
42 kfree(spi);
43}
44
45static ssize_t
46modalias_show(struct device *dev, struct device_attribute *a, char *buf)
47{
48 const struct spi_device *spi = to_spi_device(dev);
49
50 return sprintf(buf, "%s\n", spi->modalias);
51}
52
53static struct device_attribute spi_dev_attrs[] = {
54 __ATTR_RO(modalias),
55 __ATTR_NULL,
56};
57
58/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
59 * and the sysfs version makes coldplug work too.
60 */
61
62static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
63 const struct spi_device *sdev)
64{
65 while (id->name[0]) {
66 if (!strcmp(sdev->modalias, id->name))
67 return id;
68 id++;
69 }
70 return NULL;
71}
72
73const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
74{
75 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
76
77 return spi_match_id(sdrv->id_table, sdev);
78}
79EXPORT_SYMBOL_GPL(spi_get_device_id);
80
81static int spi_match_device(struct device *dev, struct device_driver *drv)
82{
83 const struct spi_device *spi = to_spi_device(dev);
84 const struct spi_driver *sdrv = to_spi_driver(drv);
85
86 /* Attempt an OF style match */
87 if (of_driver_match_device(dev, drv))
88 return 1;
89
90 if (sdrv->id_table)
91 return !!spi_match_id(sdrv->id_table, spi);
92
93 return strcmp(spi->modalias, drv->name) == 0;
94}
95
96static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
97{
98 const struct spi_device *spi = to_spi_device(dev);
99
100 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
101 return 0;
102}
103
104#ifdef CONFIG_PM_SLEEP
105static int spi_legacy_suspend(struct device *dev, pm_message_t message)
106{
107 int value = 0;
108 struct spi_driver *drv = to_spi_driver(dev->driver);
109
110 /* suspend will stop irqs and dma; no more i/o */
111 if (drv) {
112 if (drv->suspend)
113 value = drv->suspend(to_spi_device(dev), message);
114 else
115 dev_dbg(dev, "... can't suspend\n");
116 }
117 return value;
118}
119
120static int spi_legacy_resume(struct device *dev)
121{
122 int value = 0;
123 struct spi_driver *drv = to_spi_driver(dev->driver);
124
125 /* resume may restart the i/o queue */
126 if (drv) {
127 if (drv->resume)
128 value = drv->resume(to_spi_device(dev));
129 else
130 dev_dbg(dev, "... can't resume\n");
131 }
132 return value;
133}
134
135static int spi_pm_suspend(struct device *dev)
136{
137 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
138
139 if (pm)
140 return pm_generic_suspend(dev);
141 else
142 return spi_legacy_suspend(dev, PMSG_SUSPEND);
143}
144
145static int spi_pm_resume(struct device *dev)
146{
147 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
148
149 if (pm)
150 return pm_generic_resume(dev);
151 else
152 return spi_legacy_resume(dev);
153}
154
155static int spi_pm_freeze(struct device *dev)
156{
157 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
158
159 if (pm)
160 return pm_generic_freeze(dev);
161 else
162 return spi_legacy_suspend(dev, PMSG_FREEZE);
163}
164
165static int spi_pm_thaw(struct device *dev)
166{
167 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
168
169 if (pm)
170 return pm_generic_thaw(dev);
171 else
172 return spi_legacy_resume(dev);
173}
174
175static int spi_pm_poweroff(struct device *dev)
176{
177 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
178
179 if (pm)
180 return pm_generic_poweroff(dev);
181 else
182 return spi_legacy_suspend(dev, PMSG_HIBERNATE);
183}
184
185static int spi_pm_restore(struct device *dev)
186{
187 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
188
189 if (pm)
190 return pm_generic_restore(dev);
191 else
192 return spi_legacy_resume(dev);
193}
194#else
195#define spi_pm_suspend NULL
196#define spi_pm_resume NULL
197#define spi_pm_freeze NULL
198#define spi_pm_thaw NULL
199#define spi_pm_poweroff NULL
200#define spi_pm_restore NULL
201#endif
202
203static const struct dev_pm_ops spi_pm = {
204 .suspend = spi_pm_suspend,
205 .resume = spi_pm_resume,
206 .freeze = spi_pm_freeze,
207 .thaw = spi_pm_thaw,
208 .poweroff = spi_pm_poweroff,
209 .restore = spi_pm_restore,
210 SET_RUNTIME_PM_OPS(
211 pm_generic_runtime_suspend,
212 pm_generic_runtime_resume,
213 pm_generic_runtime_idle
214 )
215};
216
217struct bus_type spi_bus_type = {
218 .name = "spi",
219 .dev_attrs = spi_dev_attrs,
220 .match = spi_match_device,
221 .uevent = spi_uevent,
222 .pm = &spi_pm,
223};
224EXPORT_SYMBOL_GPL(spi_bus_type);
225
226
227static int spi_drv_probe(struct device *dev)
228{
229 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
230
231 return sdrv->probe(to_spi_device(dev));
232}
233
234static int spi_drv_remove(struct device *dev)
235{
236 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
237
238 return sdrv->remove(to_spi_device(dev));
239}
240
241static void spi_drv_shutdown(struct device *dev)
242{
243 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
244
245 sdrv->shutdown(to_spi_device(dev));
246}
247
248/**
249 * spi_register_driver - register a SPI driver
250 * @sdrv: the driver to register
251 * Context: can sleep
252 */
253int spi_register_driver(struct spi_driver *sdrv)
254{
255 sdrv->driver.bus = &spi_bus_type;
256 if (sdrv->probe)
257 sdrv->driver.probe = spi_drv_probe;
258 if (sdrv->remove)
259 sdrv->driver.remove = spi_drv_remove;
260 if (sdrv->shutdown)
261 sdrv->driver.shutdown = spi_drv_shutdown;
262 return driver_register(&sdrv->driver);
263}
264EXPORT_SYMBOL_GPL(spi_register_driver);
265
266/*-------------------------------------------------------------------------*/
267
268/* SPI devices should normally not be created by SPI device drivers; that
269 * would make them board-specific. Similarly with SPI master drivers.
270 * Device registration normally goes into like arch/.../mach.../board-YYY.c
271 * with other readonly (flashable) information about mainboard devices.
272 */
273
274struct boardinfo {
275 struct list_head list;
276 struct spi_board_info board_info;
277};
278
279static LIST_HEAD(board_list);
280static LIST_HEAD(spi_master_list);
281
282/*
283 * Used to protect add/del opertion for board_info list and
284 * spi_master list, and their matching process
285 */
286static DEFINE_MUTEX(board_lock);
287
288/**
289 * spi_alloc_device - Allocate a new SPI device
290 * @master: Controller to which device is connected
291 * Context: can sleep
292 *
293 * Allows a driver to allocate and initialize a spi_device without
294 * registering it immediately. This allows a driver to directly
295 * fill the spi_device with device parameters before calling
296 * spi_add_device() on it.
297 *
298 * Caller is responsible to call spi_add_device() on the returned
299 * spi_device structure to add it to the SPI master. If the caller
300 * needs to discard the spi_device without adding it, then it should
301 * call spi_dev_put() on it.
302 *
303 * Returns a pointer to the new device, or NULL.
304 */
305struct spi_device *spi_alloc_device(struct spi_master *master)
306{
307 struct spi_device *spi;
308 struct device *dev = master->dev.parent;
309
310 if (!spi_master_get(master))
311 return NULL;
312
313 spi = kzalloc(sizeof *spi, GFP_KERNEL);
314 if (!spi) {
315 dev_err(dev, "cannot alloc spi_device\n");
316 spi_master_put(master);
317 return NULL;
318 }
319
320 spi->master = master;
321 spi->dev.parent = dev;
322 spi->dev.bus = &spi_bus_type;
323 spi->dev.release = spidev_release;
324 device_initialize(&spi->dev);
325 return spi;
326}
327EXPORT_SYMBOL_GPL(spi_alloc_device);
328
329/**
330 * spi_add_device - Add spi_device allocated with spi_alloc_device
331 * @spi: spi_device to register
332 *
333 * Companion function to spi_alloc_device. Devices allocated with
334 * spi_alloc_device can be added onto the spi bus with this function.
335 *
336 * Returns 0 on success; negative errno on failure
337 */
338int spi_add_device(struct spi_device *spi)
339{
340 static DEFINE_MUTEX(spi_add_lock);
341 struct device *dev = spi->master->dev.parent;
342 struct device *d;
343 int status;
344
345 /* Chipselects are numbered 0..max; validate. */
346 if (spi->chip_select >= spi->master->num_chipselect) {
347 dev_err(dev, "cs%d >= max %d\n",
348 spi->chip_select,
349 spi->master->num_chipselect);
350 return -EINVAL;
351 }
352
353 /* Set the bus ID string */
354 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
355 spi->chip_select);
356
357
358 /* We need to make sure there's no other device with this
359 * chipselect **BEFORE** we call setup(), else we'll trash
360 * its configuration. Lock against concurrent add() calls.
361 */
362 mutex_lock(&spi_add_lock);
363
364 d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
365 if (d != NULL) {
366 dev_err(dev, "chipselect %d already in use\n",
367 spi->chip_select);
368 put_device(d);
369 status = -EBUSY;
370 goto done;
371 }
372
373 /* Drivers may modify this initial i/o setup, but will
374 * normally rely on the device being setup. Devices
375 * using SPI_CS_HIGH can't coexist well otherwise...
376 */
377 status = spi_setup(spi);
378 if (status < 0) {
379 dev_err(dev, "can't setup %s, status %d\n",
380 dev_name(&spi->dev), status);
381 goto done;
382 }
383
384 /* Device may be bound to an active driver when this returns */
385 status = device_add(&spi->dev);
386 if (status < 0)
387 dev_err(dev, "can't add %s, status %d\n",
388 dev_name(&spi->dev), status);
389 else
390 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
391
392done:
393 mutex_unlock(&spi_add_lock);
394 return status;
395}
396EXPORT_SYMBOL_GPL(spi_add_device);
397
398/**
399 * spi_new_device - instantiate one new SPI device
400 * @master: Controller to which device is connected
401 * @chip: Describes the SPI device
402 * Context: can sleep
403 *
404 * On typical mainboards, this is purely internal; and it's not needed
405 * after board init creates the hard-wired devices. Some development
406 * platforms may not be able to use spi_register_board_info though, and
407 * this is exported so that for example a USB or parport based adapter
408 * driver could add devices (which it would learn about out-of-band).
409 *
410 * Returns the new device, or NULL.
411 */
412struct spi_device *spi_new_device(struct spi_master *master,
413 struct spi_board_info *chip)
414{
415 struct spi_device *proxy;
416 int status;
417
418 /* NOTE: caller did any chip->bus_num checks necessary.
419 *
420 * Also, unless we change the return value convention to use
421 * error-or-pointer (not NULL-or-pointer), troubleshootability
422 * suggests syslogged diagnostics are best here (ugh).
423 */
424
425 proxy = spi_alloc_device(master);
426 if (!proxy)
427 return NULL;
428
429 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
430
431 proxy->chip_select = chip->chip_select;
432 proxy->max_speed_hz = chip->max_speed_hz;
433 proxy->mode = chip->mode;
434 proxy->irq = chip->irq;
435 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
436 proxy->dev.platform_data = (void *) chip->platform_data;
437 proxy->controller_data = chip->controller_data;
438 proxy->controller_state = NULL;
439
440 status = spi_add_device(proxy);
441 if (status < 0) {
442 spi_dev_put(proxy);
443 return NULL;
444 }
445
446 return proxy;
447}
448EXPORT_SYMBOL_GPL(spi_new_device);
449
450static void spi_match_master_to_boardinfo(struct spi_master *master,
451 struct spi_board_info *bi)
452{
453 struct spi_device *dev;
454
455 if (master->bus_num != bi->bus_num)
456 return;
457
458 dev = spi_new_device(master, bi);
459 if (!dev)
460 dev_err(master->dev.parent, "can't create new device for %s\n",
461 bi->modalias);
462}
463
464/**
465 * spi_register_board_info - register SPI devices for a given board
466 * @info: array of chip descriptors
467 * @n: how many descriptors are provided
468 * Context: can sleep
469 *
470 * Board-specific early init code calls this (probably during arch_initcall)
471 * with segments of the SPI device table. Any device nodes are created later,
472 * after the relevant parent SPI controller (bus_num) is defined. We keep
473 * this table of devices forever, so that reloading a controller driver will
474 * not make Linux forget about these hard-wired devices.
475 *
476 * Other code can also call this, e.g. a particular add-on board might provide
477 * SPI devices through its expansion connector, so code initializing that board
478 * would naturally declare its SPI devices.
479 *
480 * The board info passed can safely be __initdata ... but be careful of
481 * any embedded pointers (platform_data, etc), they're copied as-is.
482 */
483int __init
484spi_register_board_info(struct spi_board_info const *info, unsigned n)
485{
486 struct boardinfo *bi;
487 int i;
488
489 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
490 if (!bi)
491 return -ENOMEM;
492
493 for (i = 0; i < n; i++, bi++, info++) {
494 struct spi_master *master;
495
496 memcpy(&bi->board_info, info, sizeof(*info));
497 mutex_lock(&board_lock);
498 list_add_tail(&bi->list, &board_list);
499 list_for_each_entry(master, &spi_master_list, list)
500 spi_match_master_to_boardinfo(master, &bi->board_info);
501 mutex_unlock(&board_lock);
502 }
503
504 return 0;
505}
506
507/*-------------------------------------------------------------------------*/
508
509static void spi_master_release(struct device *dev)
510{
511 struct spi_master *master;
512
513 master = container_of(dev, struct spi_master, dev);
514 kfree(master);
515}
516
517static struct class spi_master_class = {
518 .name = "spi_master",
519 .owner = THIS_MODULE,
520 .dev_release = spi_master_release,
521};
522
523
524/**
525 * spi_alloc_master - allocate SPI master controller
526 * @dev: the controller, possibly using the platform_bus
527 * @size: how much zeroed driver-private data to allocate; the pointer to this
528 * memory is in the driver_data field of the returned device,
529 * accessible with spi_master_get_devdata().
530 * Context: can sleep
531 *
532 * This call is used only by SPI master controller drivers, which are the
533 * only ones directly touching chip registers. It's how they allocate
534 * an spi_master structure, prior to calling spi_register_master().
535 *
536 * This must be called from context that can sleep. It returns the SPI
537 * master structure on success, else NULL.
538 *
539 * The caller is responsible for assigning the bus number and initializing
540 * the master's methods before calling spi_register_master(); and (after errors
541 * adding the device) calling spi_master_put() to prevent a memory leak.
542 */
543struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
544{
545 struct spi_master *master;
546
547 if (!dev)
548 return NULL;
549
550 master = kzalloc(size + sizeof *master, GFP_KERNEL);
551 if (!master)
552 return NULL;
553
554 device_initialize(&master->dev);
555 master->dev.class = &spi_master_class;
556 master->dev.parent = get_device(dev);
557 spi_master_set_devdata(master, &master[1]);
558
559 return master;
560}
561EXPORT_SYMBOL_GPL(spi_alloc_master);
562
563/**
564 * spi_register_master - register SPI master controller
565 * @master: initialized master, originally from spi_alloc_master()
566 * Context: can sleep
567 *
568 * SPI master controllers connect to their drivers using some non-SPI bus,
569 * such as the platform bus. The final stage of probe() in that code
570 * includes calling spi_register_master() to hook up to this SPI bus glue.
571 *
572 * SPI controllers use board specific (often SOC specific) bus numbers,
573 * and board-specific addressing for SPI devices combines those numbers
574 * with chip select numbers. Since SPI does not directly support dynamic
575 * device identification, boards need configuration tables telling which
576 * chip is at which address.
577 *
578 * This must be called from context that can sleep. It returns zero on
579 * success, else a negative error code (dropping the master's refcount).
580 * After a successful return, the caller is responsible for calling
581 * spi_unregister_master().
582 */
583int spi_register_master(struct spi_master *master)
584{
585 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
586 struct device *dev = master->dev.parent;
587 struct boardinfo *bi;
588 int status = -ENODEV;
589 int dynamic = 0;
590
591 if (!dev)
592 return -ENODEV;
593
594 /* even if it's just one always-selected device, there must
595 * be at least one chipselect
596 */
597 if (master->num_chipselect == 0)
598 return -EINVAL;
599
600 /* convention: dynamically assigned bus IDs count down from the max */
601 if (master->bus_num < 0) {
602 /* FIXME switch to an IDR based scheme, something like
603 * I2C now uses, so we can't run out of "dynamic" IDs
604 */
605 master->bus_num = atomic_dec_return(&dyn_bus_id);
606 dynamic = 1;
607 }
608
609 spin_lock_init(&master->bus_lock_spinlock);
610 mutex_init(&master->bus_lock_mutex);
611 master->bus_lock_flag = 0;
612
613 /* register the device, then userspace will see it.
614 * registration fails if the bus ID is in use.
615 */
616 dev_set_name(&master->dev, "spi%u", master->bus_num);
617 status = device_add(&master->dev);
618 if (status < 0)
619 goto done;
620 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
621 dynamic ? " (dynamic)" : "");
622
623 mutex_lock(&board_lock);
624 list_add_tail(&master->list, &spi_master_list);
625 list_for_each_entry(bi, &board_list, list)
626 spi_match_master_to_boardinfo(master, &bi->board_info);
627 mutex_unlock(&board_lock);
628
629 status = 0;
630
631 /* Register devices from the device tree */
632 of_register_spi_devices(master);
633done:
634 return status;
635}
636EXPORT_SYMBOL_GPL(spi_register_master);
637
638
639static int __unregister(struct device *dev, void *null)
640{
641 spi_unregister_device(to_spi_device(dev));
642 return 0;
643}
644
645/**
646 * spi_unregister_master - unregister SPI master controller
647 * @master: the master being unregistered
648 * Context: can sleep
649 *
650 * This call is used only by SPI master controller drivers, which are the
651 * only ones directly touching chip registers.
652 *
653 * This must be called from context that can sleep.
654 */
655void spi_unregister_master(struct spi_master *master)
656{
657 int dummy;
658
659 mutex_lock(&board_lock);
660 list_del(&master->list);
661 mutex_unlock(&board_lock);
662
663 dummy = device_for_each_child(&master->dev, NULL, __unregister);
664 device_unregister(&master->dev);
665}
666EXPORT_SYMBOL_GPL(spi_unregister_master);
667
668static int __spi_master_match(struct device *dev, void *data)
669{
670 struct spi_master *m;
671 u16 *bus_num = data;
672
673 m = container_of(dev, struct spi_master, dev);
674 return m->bus_num == *bus_num;
675}
676
677/**
678 * spi_busnum_to_master - look up master associated with bus_num
679 * @bus_num: the master's bus number
680 * Context: can sleep
681 *
682 * This call may be used with devices that are registered after
683 * arch init time. It returns a refcounted pointer to the relevant
684 * spi_master (which the caller must release), or NULL if there is
685 * no such master registered.
686 */
687struct spi_master *spi_busnum_to_master(u16 bus_num)
688{
689 struct device *dev;
690 struct spi_master *master = NULL;
691
692 dev = class_find_device(&spi_master_class, NULL, &bus_num,
693 __spi_master_match);
694 if (dev)
695 master = container_of(dev, struct spi_master, dev);
696 /* reference got in class_find_device */
697 return master;
698}
699EXPORT_SYMBOL_GPL(spi_busnum_to_master);
700
701
702/*-------------------------------------------------------------------------*/
703
704/* Core methods for SPI master protocol drivers. Some of the
705 * other core methods are currently defined as inline functions.
706 */
707
708/**
709 * spi_setup - setup SPI mode and clock rate
710 * @spi: the device whose settings are being modified
711 * Context: can sleep, and no requests are queued to the device
712 *
713 * SPI protocol drivers may need to update the transfer mode if the
714 * device doesn't work with its default. They may likewise need
715 * to update clock rates or word sizes from initial values. This function
716 * changes those settings, and must be called from a context that can sleep.
717 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
718 * effect the next time the device is selected and data is transferred to
719 * or from it. When this function returns, the spi device is deselected.
720 *
721 * Note that this call will fail if the protocol driver specifies an option
722 * that the underlying controller or its driver does not support. For
723 * example, not all hardware supports wire transfers using nine bit words,
724 * LSB-first wire encoding, or active-high chipselects.
725 */
726int spi_setup(struct spi_device *spi)
727{
728 unsigned bad_bits;
729 int status;
730
731 /* help drivers fail *cleanly* when they need options
732 * that aren't supported with their current master
733 */
734 bad_bits = spi->mode & ~spi->master->mode_bits;
735 if (bad_bits) {
736 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
737 bad_bits);
738 return -EINVAL;
739 }
740
741 if (!spi->bits_per_word)
742 spi->bits_per_word = 8;
743
744 status = spi->master->setup(spi);
745
746 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
747 "%u bits/w, %u Hz max --> %d\n",
748 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
749 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
750 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
751 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
752 (spi->mode & SPI_LOOP) ? "loopback, " : "",
753 spi->bits_per_word, spi->max_speed_hz,
754 status);
755
756 return status;
757}
758EXPORT_SYMBOL_GPL(spi_setup);
759
760static int __spi_async(struct spi_device *spi, struct spi_message *message)
761{
762 struct spi_master *master = spi->master;
763
764 /* Half-duplex links include original MicroWire, and ones with
765 * only one data pin like SPI_3WIRE (switches direction) or where
766 * either MOSI or MISO is missing. They can also be caused by
767 * software limitations.
768 */
769 if ((master->flags & SPI_MASTER_HALF_DUPLEX)
770 || (spi->mode & SPI_3WIRE)) {
771 struct spi_transfer *xfer;
772 unsigned flags = master->flags;
773
774 list_for_each_entry(xfer, &message->transfers, transfer_list) {
775 if (xfer->rx_buf && xfer->tx_buf)
776 return -EINVAL;
777 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
778 return -EINVAL;
779 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
780 return -EINVAL;
781 }
782 }
783
784 message->spi = spi;
785 message->status = -EINPROGRESS;
786 return master->transfer(spi, message);
787}
788
789/**
790 * spi_async - asynchronous SPI transfer
791 * @spi: device with which data will be exchanged
792 * @message: describes the data transfers, including completion callback
793 * Context: any (irqs may be blocked, etc)
794 *
795 * This call may be used in_irq and other contexts which can't sleep,
796 * as well as from task contexts which can sleep.
797 *
798 * The completion callback is invoked in a context which can't sleep.
799 * Before that invocation, the value of message->status is undefined.
800 * When the callback is issued, message->status holds either zero (to
801 * indicate complete success) or a negative error code. After that
802 * callback returns, the driver which issued the transfer request may
803 * deallocate the associated memory; it's no longer in use by any SPI
804 * core or controller driver code.
805 *
806 * Note that although all messages to a spi_device are handled in
807 * FIFO order, messages may go to different devices in other orders.
808 * Some device might be higher priority, or have various "hard" access
809 * time requirements, for example.
810 *
811 * On detection of any fault during the transfer, processing of
812 * the entire message is aborted, and the device is deselected.
813 * Until returning from the associated message completion callback,
814 * no other spi_message queued to that device will be processed.
815 * (This rule applies equally to all the synchronous transfer calls,
816 * which are wrappers around this core asynchronous primitive.)
817 */
818int spi_async(struct spi_device *spi, struct spi_message *message)
819{
820 struct spi_master *master = spi->master;
821 int ret;
822 unsigned long flags;
823
824 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
825
826 if (master->bus_lock_flag)
827 ret = -EBUSY;
828 else
829 ret = __spi_async(spi, message);
830
831 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
832
833 return ret;
834}
835EXPORT_SYMBOL_GPL(spi_async);
836
837/**
838 * spi_async_locked - version of spi_async with exclusive bus usage
839 * @spi: device with which data will be exchanged
840 * @message: describes the data transfers, including completion callback
841 * Context: any (irqs may be blocked, etc)
842 *
843 * This call may be used in_irq and other contexts which can't sleep,
844 * as well as from task contexts which can sleep.
845 *
846 * The completion callback is invoked in a context which can't sleep.
847 * Before that invocation, the value of message->status is undefined.
848 * When the callback is issued, message->status holds either zero (to
849 * indicate complete success) or a negative error code. After that
850 * callback returns, the driver which issued the transfer request may
851 * deallocate the associated memory; it's no longer in use by any SPI
852 * core or controller driver code.
853 *
854 * Note that although all messages to a spi_device are handled in
855 * FIFO order, messages may go to different devices in other orders.
856 * Some device might be higher priority, or have various "hard" access
857 * time requirements, for example.
858 *
859 * On detection of any fault during the transfer, processing of
860 * the entire message is aborted, and the device is deselected.
861 * Until returning from the associated message completion callback,
862 * no other spi_message queued to that device will be processed.
863 * (This rule applies equally to all the synchronous transfer calls,
864 * which are wrappers around this core asynchronous primitive.)
865 */
866int spi_async_locked(struct spi_device *spi, struct spi_message *message)
867{
868 struct spi_master *master = spi->master;
869 int ret;
870 unsigned long flags;
871
872 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
873
874 ret = __spi_async(spi, message);
875
876 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
877
878 return ret;
879
880}
881EXPORT_SYMBOL_GPL(spi_async_locked);
882
883
884/*-------------------------------------------------------------------------*/
885
886/* Utility methods for SPI master protocol drivers, layered on
887 * top of the core. Some other utility methods are defined as
888 * inline functions.
889 */
890
891static void spi_complete(void *arg)
892{
893 complete(arg);
894}
895
896static int __spi_sync(struct spi_device *spi, struct spi_message *message,
897 int bus_locked)
898{
899 DECLARE_COMPLETION_ONSTACK(done);
900 int status;
901 struct spi_master *master = spi->master;
902
903 message->complete = spi_complete;
904 message->context = &done;
905
906 if (!bus_locked)
907 mutex_lock(&master->bus_lock_mutex);
908
909 status = spi_async_locked(spi, message);
910
911 if (!bus_locked)
912 mutex_unlock(&master->bus_lock_mutex);
913
914 if (status == 0) {
915 wait_for_completion(&done);
916 status = message->status;
917 }
918 message->context = NULL;
919 return status;
920}
921
922/**
923 * spi_sync - blocking/synchronous SPI data transfers
924 * @spi: device with which data will be exchanged
925 * @message: describes the data transfers
926 * Context: can sleep
927 *
928 * This call may only be used from a context that may sleep. The sleep
929 * is non-interruptible, and has no timeout. Low-overhead controller
930 * drivers may DMA directly into and out of the message buffers.
931 *
932 * Note that the SPI device's chip select is active during the message,
933 * and then is normally disabled between messages. Drivers for some
934 * frequently-used devices may want to minimize costs of selecting a chip,
935 * by leaving it selected in anticipation that the next message will go
936 * to the same chip. (That may increase power usage.)
937 *
938 * Also, the caller is guaranteeing that the memory associated with the
939 * message will not be freed before this call returns.
940 *
941 * It returns zero on success, else a negative error code.
942 */
943int spi_sync(struct spi_device *spi, struct spi_message *message)
944{
945 return __spi_sync(spi, message, 0);
946}
947EXPORT_SYMBOL_GPL(spi_sync);
948
949/**
950 * spi_sync_locked - version of spi_sync with exclusive bus usage
951 * @spi: device with which data will be exchanged
952 * @message: describes the data transfers
953 * Context: can sleep
954 *
955 * This call may only be used from a context that may sleep. The sleep
956 * is non-interruptible, and has no timeout. Low-overhead controller
957 * drivers may DMA directly into and out of the message buffers.
958 *
959 * This call should be used by drivers that require exclusive access to the
960 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
961 * be released by a spi_bus_unlock call when the exclusive access is over.
962 *
963 * It returns zero on success, else a negative error code.
964 */
965int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
966{
967 return __spi_sync(spi, message, 1);
968}
969EXPORT_SYMBOL_GPL(spi_sync_locked);
970
971/**
972 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
973 * @master: SPI bus master that should be locked for exclusive bus access
974 * Context: can sleep
975 *
976 * This call may only be used from a context that may sleep. The sleep
977 * is non-interruptible, and has no timeout.
978 *
979 * This call should be used by drivers that require exclusive access to the
980 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
981 * exclusive access is over. Data transfer must be done by spi_sync_locked
982 * and spi_async_locked calls when the SPI bus lock is held.
983 *
984 * It returns zero on success, else a negative error code.
985 */
986int spi_bus_lock(struct spi_master *master)
987{
988 unsigned long flags;
989
990 mutex_lock(&master->bus_lock_mutex);
991
992 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
993 master->bus_lock_flag = 1;
994 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
995
996 /* mutex remains locked until spi_bus_unlock is called */
997
998 return 0;
999}
1000EXPORT_SYMBOL_GPL(spi_bus_lock);
1001
1002/**
1003 * spi_bus_unlock - release the lock for exclusive SPI bus usage
1004 * @master: SPI bus master that was locked for exclusive bus access
1005 * Context: can sleep
1006 *
1007 * This call may only be used from a context that may sleep. The sleep
1008 * is non-interruptible, and has no timeout.
1009 *
1010 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
1011 * call.
1012 *
1013 * It returns zero on success, else a negative error code.
1014 */
1015int spi_bus_unlock(struct spi_master *master)
1016{
1017 master->bus_lock_flag = 0;
1018
1019 mutex_unlock(&master->bus_lock_mutex);
1020
1021 return 0;
1022}
1023EXPORT_SYMBOL_GPL(spi_bus_unlock);
1024
1025/* portable code must never pass more than 32 bytes */
1026#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
1027
1028static u8 *buf;
1029
1030/**
1031 * spi_write_then_read - SPI synchronous write followed by read
1032 * @spi: device with which data will be exchanged
1033 * @txbuf: data to be written (need not be dma-safe)
1034 * @n_tx: size of txbuf, in bytes
1035 * @rxbuf: buffer into which data will be read (need not be dma-safe)
1036 * @n_rx: size of rxbuf, in bytes
1037 * Context: can sleep
1038 *
1039 * This performs a half duplex MicroWire style transaction with the
1040 * device, sending txbuf and then reading rxbuf. The return value
1041 * is zero for success, else a negative errno status code.
1042 * This call may only be used from a context that may sleep.
1043 *
1044 * Parameters to this routine are always copied using a small buffer;
1045 * portable code should never use this for more than 32 bytes.
1046 * Performance-sensitive or bulk transfer code should instead use
1047 * spi_{async,sync}() calls with dma-safe buffers.
1048 */
1049int spi_write_then_read(struct spi_device *spi,
1050 const void *txbuf, unsigned n_tx,
1051 void *rxbuf, unsigned n_rx)
1052{
1053 static DEFINE_MUTEX(lock);
1054
1055 int status;
1056 struct spi_message message;
1057 struct spi_transfer x[2];
1058 u8 *local_buf;
1059
1060 /* Use preallocated DMA-safe buffer. We can't avoid copying here,
1061 * (as a pure convenience thing), but we can keep heap costs
1062 * out of the hot path ...
1063 */
1064 if ((n_tx + n_rx) > SPI_BUFSIZ)
1065 return -EINVAL;
1066
1067 spi_message_init(&message);
1068 memset(x, 0, sizeof x);
1069 if (n_tx) {
1070 x[0].len = n_tx;
1071 spi_message_add_tail(&x[0], &message);
1072 }
1073 if (n_rx) {
1074 x[1].len = n_rx;
1075 spi_message_add_tail(&x[1], &message);
1076 }
1077
1078 /* ... unless someone else is using the pre-allocated buffer */
1079 if (!mutex_trylock(&lock)) {
1080 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
1081 if (!local_buf)
1082 return -ENOMEM;
1083 } else
1084 local_buf = buf;
1085
1086 memcpy(local_buf, txbuf, n_tx);
1087 x[0].tx_buf = local_buf;
1088 x[1].rx_buf = local_buf + n_tx;
1089
1090 /* do the i/o */
1091 status = spi_sync(spi, &message);
1092 if (status == 0)
1093 memcpy(rxbuf, x[1].rx_buf, n_rx);
1094
1095 if (x[0].tx_buf == buf)
1096 mutex_unlock(&lock);
1097 else
1098 kfree(local_buf);
1099
1100 return status;
1101}
1102EXPORT_SYMBOL_GPL(spi_write_then_read);
1103
1104/*-------------------------------------------------------------------------*/
1105
1106static int __init spi_init(void)
1107{
1108 int status;
1109
1110 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
1111 if (!buf) {
1112 status = -ENOMEM;
1113 goto err0;
1114 }
1115
1116 status = bus_register(&spi_bus_type);
1117 if (status < 0)
1118 goto err1;
1119
1120 status = class_register(&spi_master_class);
1121 if (status < 0)
1122 goto err2;
1123 return 0;
1124
1125err2:
1126 bus_unregister(&spi_bus_type);
1127err1:
1128 kfree(buf);
1129 buf = NULL;
1130err0:
1131 return status;
1132}
1133
1134/* board_info is normally registered in arch_initcall(),
1135 * but even essential drivers wait till later
1136 *
1137 * REVISIT only boardinfo really needs static linking. the rest (device and
1138 * driver registration) _could_ be dynamically linked (modular) ... costs
1139 * include needing to have boardinfo data structures be much more public.
1140 */
1141postcore_initcall(spi_init);
1142
1// SPDX-License-Identifier: GPL-2.0-or-later
2// SPI init/core code
3//
4// Copyright (C) 2005 David Brownell
5// Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7#include <linux/acpi.h>
8#include <linux/cache.h>
9#include <linux/clk/clk-conf.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/dmaengine.h>
13#include <linux/dma-mapping.h>
14#include <linux/export.h>
15#include <linux/gpio/consumer.h>
16#include <linux/highmem.h>
17#include <linux/idr.h>
18#include <linux/init.h>
19#include <linux/ioport.h>
20#include <linux/kernel.h>
21#include <linux/kthread.h>
22#include <linux/mod_devicetable.h>
23#include <linux/mutex.h>
24#include <linux/of_device.h>
25#include <linux/of_irq.h>
26#include <linux/percpu.h>
27#include <linux/platform_data/x86/apple.h>
28#include <linux/pm_domain.h>
29#include <linux/pm_runtime.h>
30#include <linux/property.h>
31#include <linux/ptp_clock_kernel.h>
32#include <linux/sched/rt.h>
33#include <linux/slab.h>
34#include <linux/spi/spi.h>
35#include <linux/spi/spi-mem.h>
36#include <uapi/linux/sched/types.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/spi.h>
40EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43#include "internals.h"
44
45static DEFINE_IDR(spi_master_idr);
46
47static void spidev_release(struct device *dev)
48{
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55}
56
57static ssize_t
58modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59{
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68}
69static DEVICE_ATTR_RO(modalias);
70
71static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74{
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83}
84
85static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87{
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95}
96static DEVICE_ATTR_RW(driver_override);
97
98static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99{
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118}
119
120static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 char *buf, size_t offset)
122{
123 u64 val = 0;
124 int i;
125
126 for_each_possible_cpu(i) {
127 const struct spi_statistics *pcpu_stats;
128 u64_stats_t *field;
129 unsigned int start;
130 u64 inc;
131
132 pcpu_stats = per_cpu_ptr(stat, i);
133 field = (void *)pcpu_stats + offset;
134 do {
135 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 inc = u64_stats_read(field);
137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 val += inc;
139 }
140 return sysfs_emit(buf, "%llu\n", val);
141}
142
143#define SPI_STATISTICS_ATTRS(field, file) \
144static ssize_t spi_controller_##field##_show(struct device *dev, \
145 struct device_attribute *attr, \
146 char *buf) \
147{ \
148 struct spi_controller *ctlr = container_of(dev, \
149 struct spi_controller, dev); \
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151} \
152static struct device_attribute dev_attr_spi_controller_##field = { \
153 .attr = { .name = file, .mode = 0444 }, \
154 .show = spi_controller_##field##_show, \
155}; \
156static ssize_t spi_device_##field##_show(struct device *dev, \
157 struct device_attribute *attr, \
158 char *buf) \
159{ \
160 struct spi_device *spi = to_spi_device(dev); \
161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162} \
163static struct device_attribute dev_attr_spi_device_##field = { \
164 .attr = { .name = file, .mode = 0444 }, \
165 .show = spi_device_##field##_show, \
166}
167
168#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
169static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 char *buf) \
171{ \
172 return spi_emit_pcpu_stats(stat, buf, \
173 offsetof(struct spi_statistics, field)); \
174} \
175SPI_STATISTICS_ATTRS(name, file)
176
177#define SPI_STATISTICS_SHOW(field) \
178 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
179 field)
180
181SPI_STATISTICS_SHOW(messages);
182SPI_STATISTICS_SHOW(transfers);
183SPI_STATISTICS_SHOW(errors);
184SPI_STATISTICS_SHOW(timedout);
185
186SPI_STATISTICS_SHOW(spi_sync);
187SPI_STATISTICS_SHOW(spi_sync_immediate);
188SPI_STATISTICS_SHOW(spi_async);
189
190SPI_STATISTICS_SHOW(bytes);
191SPI_STATISTICS_SHOW(bytes_rx);
192SPI_STATISTICS_SHOW(bytes_tx);
193
194#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
195 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
196 "transfer_bytes_histo_" number, \
197 transfer_bytes_histo[index])
198SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
199SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
200SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
201SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
202SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
203SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
204SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
205SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
206SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
207SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
208SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215
216SPI_STATISTICS_SHOW(transfers_split_maxsize);
217
218static struct attribute *spi_dev_attrs[] = {
219 &dev_attr_modalias.attr,
220 &dev_attr_driver_override.attr,
221 NULL,
222};
223
224static const struct attribute_group spi_dev_group = {
225 .attrs = spi_dev_attrs,
226};
227
228static struct attribute *spi_device_statistics_attrs[] = {
229 &dev_attr_spi_device_messages.attr,
230 &dev_attr_spi_device_transfers.attr,
231 &dev_attr_spi_device_errors.attr,
232 &dev_attr_spi_device_timedout.attr,
233 &dev_attr_spi_device_spi_sync.attr,
234 &dev_attr_spi_device_spi_sync_immediate.attr,
235 &dev_attr_spi_device_spi_async.attr,
236 &dev_attr_spi_device_bytes.attr,
237 &dev_attr_spi_device_bytes_rx.attr,
238 &dev_attr_spi_device_bytes_tx.attr,
239 &dev_attr_spi_device_transfer_bytes_histo0.attr,
240 &dev_attr_spi_device_transfer_bytes_histo1.attr,
241 &dev_attr_spi_device_transfer_bytes_histo2.attr,
242 &dev_attr_spi_device_transfer_bytes_histo3.attr,
243 &dev_attr_spi_device_transfer_bytes_histo4.attr,
244 &dev_attr_spi_device_transfer_bytes_histo5.attr,
245 &dev_attr_spi_device_transfer_bytes_histo6.attr,
246 &dev_attr_spi_device_transfer_bytes_histo7.attr,
247 &dev_attr_spi_device_transfer_bytes_histo8.attr,
248 &dev_attr_spi_device_transfer_bytes_histo9.attr,
249 &dev_attr_spi_device_transfer_bytes_histo10.attr,
250 &dev_attr_spi_device_transfer_bytes_histo11.attr,
251 &dev_attr_spi_device_transfer_bytes_histo12.attr,
252 &dev_attr_spi_device_transfer_bytes_histo13.attr,
253 &dev_attr_spi_device_transfer_bytes_histo14.attr,
254 &dev_attr_spi_device_transfer_bytes_histo15.attr,
255 &dev_attr_spi_device_transfer_bytes_histo16.attr,
256 &dev_attr_spi_device_transfers_split_maxsize.attr,
257 NULL,
258};
259
260static const struct attribute_group spi_device_statistics_group = {
261 .name = "statistics",
262 .attrs = spi_device_statistics_attrs,
263};
264
265static const struct attribute_group *spi_dev_groups[] = {
266 &spi_dev_group,
267 &spi_device_statistics_group,
268 NULL,
269};
270
271static struct attribute *spi_controller_statistics_attrs[] = {
272 &dev_attr_spi_controller_messages.attr,
273 &dev_attr_spi_controller_transfers.attr,
274 &dev_attr_spi_controller_errors.attr,
275 &dev_attr_spi_controller_timedout.attr,
276 &dev_attr_spi_controller_spi_sync.attr,
277 &dev_attr_spi_controller_spi_sync_immediate.attr,
278 &dev_attr_spi_controller_spi_async.attr,
279 &dev_attr_spi_controller_bytes.attr,
280 &dev_attr_spi_controller_bytes_rx.attr,
281 &dev_attr_spi_controller_bytes_tx.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 &dev_attr_spi_controller_transfers_split_maxsize.attr,
300 NULL,
301};
302
303static const struct attribute_group spi_controller_statistics_group = {
304 .name = "statistics",
305 .attrs = spi_controller_statistics_attrs,
306};
307
308static const struct attribute_group *spi_master_groups[] = {
309 &spi_controller_statistics_group,
310 NULL,
311};
312
313static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 struct spi_transfer *xfer,
315 struct spi_controller *ctlr)
316{
317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 struct spi_statistics *stats;
319
320 if (l2len < 0)
321 l2len = 0;
322
323 get_cpu();
324 stats = this_cpu_ptr(pcpu_stats);
325 u64_stats_update_begin(&stats->syncp);
326
327 u64_stats_inc(&stats->transfers);
328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329
330 u64_stats_add(&stats->bytes, xfer->len);
331 if ((xfer->tx_buf) &&
332 (xfer->tx_buf != ctlr->dummy_tx))
333 u64_stats_add(&stats->bytes_tx, xfer->len);
334 if ((xfer->rx_buf) &&
335 (xfer->rx_buf != ctlr->dummy_rx))
336 u64_stats_add(&stats->bytes_rx, xfer->len);
337
338 u64_stats_update_end(&stats->syncp);
339 put_cpu();
340}
341
342/*
343 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
344 * and the sysfs version makes coldplug work too.
345 */
346static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
347{
348 while (id->name[0]) {
349 if (!strcmp(name, id->name))
350 return id;
351 id++;
352 }
353 return NULL;
354}
355
356const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
357{
358 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
359
360 return spi_match_id(sdrv->id_table, sdev->modalias);
361}
362EXPORT_SYMBOL_GPL(spi_get_device_id);
363
364const void *spi_get_device_match_data(const struct spi_device *sdev)
365{
366 const void *match;
367
368 match = device_get_match_data(&sdev->dev);
369 if (match)
370 return match;
371
372 return (const void *)spi_get_device_id(sdev)->driver_data;
373}
374EXPORT_SYMBOL_GPL(spi_get_device_match_data);
375
376static int spi_match_device(struct device *dev, struct device_driver *drv)
377{
378 const struct spi_device *spi = to_spi_device(dev);
379 const struct spi_driver *sdrv = to_spi_driver(drv);
380
381 /* Check override first, and if set, only use the named driver */
382 if (spi->driver_override)
383 return strcmp(spi->driver_override, drv->name) == 0;
384
385 /* Attempt an OF style match */
386 if (of_driver_match_device(dev, drv))
387 return 1;
388
389 /* Then try ACPI */
390 if (acpi_driver_match_device(dev, drv))
391 return 1;
392
393 if (sdrv->id_table)
394 return !!spi_match_id(sdrv->id_table, spi->modalias);
395
396 return strcmp(spi->modalias, drv->name) == 0;
397}
398
399static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
400{
401 const struct spi_device *spi = to_spi_device(dev);
402 int rc;
403
404 rc = acpi_device_uevent_modalias(dev, env);
405 if (rc != -ENODEV)
406 return rc;
407
408 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
409}
410
411static int spi_probe(struct device *dev)
412{
413 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
414 struct spi_device *spi = to_spi_device(dev);
415 int ret;
416
417 ret = of_clk_set_defaults(dev->of_node, false);
418 if (ret)
419 return ret;
420
421 if (dev->of_node) {
422 spi->irq = of_irq_get(dev->of_node, 0);
423 if (spi->irq == -EPROBE_DEFER)
424 return -EPROBE_DEFER;
425 if (spi->irq < 0)
426 spi->irq = 0;
427 }
428
429 ret = dev_pm_domain_attach(dev, true);
430 if (ret)
431 return ret;
432
433 if (sdrv->probe) {
434 ret = sdrv->probe(spi);
435 if (ret)
436 dev_pm_domain_detach(dev, true);
437 }
438
439 return ret;
440}
441
442static void spi_remove(struct device *dev)
443{
444 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
445
446 if (sdrv->remove)
447 sdrv->remove(to_spi_device(dev));
448
449 dev_pm_domain_detach(dev, true);
450}
451
452static void spi_shutdown(struct device *dev)
453{
454 if (dev->driver) {
455 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
456
457 if (sdrv->shutdown)
458 sdrv->shutdown(to_spi_device(dev));
459 }
460}
461
462const struct bus_type spi_bus_type = {
463 .name = "spi",
464 .dev_groups = spi_dev_groups,
465 .match = spi_match_device,
466 .uevent = spi_uevent,
467 .probe = spi_probe,
468 .remove = spi_remove,
469 .shutdown = spi_shutdown,
470};
471EXPORT_SYMBOL_GPL(spi_bus_type);
472
473/**
474 * __spi_register_driver - register a SPI driver
475 * @owner: owner module of the driver to register
476 * @sdrv: the driver to register
477 * Context: can sleep
478 *
479 * Return: zero on success, else a negative error code.
480 */
481int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
482{
483 sdrv->driver.owner = owner;
484 sdrv->driver.bus = &spi_bus_type;
485
486 /*
487 * For Really Good Reasons we use spi: modaliases not of:
488 * modaliases for DT so module autoloading won't work if we
489 * don't have a spi_device_id as well as a compatible string.
490 */
491 if (sdrv->driver.of_match_table) {
492 const struct of_device_id *of_id;
493
494 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
495 of_id++) {
496 const char *of_name;
497
498 /* Strip off any vendor prefix */
499 of_name = strnchr(of_id->compatible,
500 sizeof(of_id->compatible), ',');
501 if (of_name)
502 of_name++;
503 else
504 of_name = of_id->compatible;
505
506 if (sdrv->id_table) {
507 const struct spi_device_id *spi_id;
508
509 spi_id = spi_match_id(sdrv->id_table, of_name);
510 if (spi_id)
511 continue;
512 } else {
513 if (strcmp(sdrv->driver.name, of_name) == 0)
514 continue;
515 }
516
517 pr_warn("SPI driver %s has no spi_device_id for %s\n",
518 sdrv->driver.name, of_id->compatible);
519 }
520 }
521
522 return driver_register(&sdrv->driver);
523}
524EXPORT_SYMBOL_GPL(__spi_register_driver);
525
526/*-------------------------------------------------------------------------*/
527
528/*
529 * SPI devices should normally not be created by SPI device drivers; that
530 * would make them board-specific. Similarly with SPI controller drivers.
531 * Device registration normally goes into like arch/.../mach.../board-YYY.c
532 * with other readonly (flashable) information about mainboard devices.
533 */
534
535struct boardinfo {
536 struct list_head list;
537 struct spi_board_info board_info;
538};
539
540static LIST_HEAD(board_list);
541static LIST_HEAD(spi_controller_list);
542
543/*
544 * Used to protect add/del operation for board_info list and
545 * spi_controller list, and their matching process also used
546 * to protect object of type struct idr.
547 */
548static DEFINE_MUTEX(board_lock);
549
550/**
551 * spi_alloc_device - Allocate a new SPI device
552 * @ctlr: Controller to which device is connected
553 * Context: can sleep
554 *
555 * Allows a driver to allocate and initialize a spi_device without
556 * registering it immediately. This allows a driver to directly
557 * fill the spi_device with device parameters before calling
558 * spi_add_device() on it.
559 *
560 * Caller is responsible to call spi_add_device() on the returned
561 * spi_device structure to add it to the SPI controller. If the caller
562 * needs to discard the spi_device without adding it, then it should
563 * call spi_dev_put() on it.
564 *
565 * Return: a pointer to the new device, or NULL.
566 */
567struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
568{
569 struct spi_device *spi;
570
571 if (!spi_controller_get(ctlr))
572 return NULL;
573
574 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
575 if (!spi) {
576 spi_controller_put(ctlr);
577 return NULL;
578 }
579
580 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
581 if (!spi->pcpu_statistics) {
582 kfree(spi);
583 spi_controller_put(ctlr);
584 return NULL;
585 }
586
587 spi->controller = ctlr;
588 spi->dev.parent = &ctlr->dev;
589 spi->dev.bus = &spi_bus_type;
590 spi->dev.release = spidev_release;
591 spi->mode = ctlr->buswidth_override_bits;
592
593 device_initialize(&spi->dev);
594 return spi;
595}
596EXPORT_SYMBOL_GPL(spi_alloc_device);
597
598static void spi_dev_set_name(struct spi_device *spi)
599{
600 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
601
602 if (adev) {
603 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
604 return;
605 }
606
607 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
608 spi_get_chipselect(spi, 0));
609}
610
611/*
612 * Zero(0) is a valid physical CS value and can be located at any
613 * logical CS in the spi->chip_select[]. If all the physical CS
614 * are initialized to 0 then It would be difficult to differentiate
615 * between a valid physical CS 0 & an unused logical CS whose physical
616 * CS can be 0. As a solution to this issue initialize all the CS to -1.
617 * Now all the unused logical CS will have -1 physical CS value & can be
618 * ignored while performing physical CS validity checks.
619 */
620#define SPI_INVALID_CS ((s8)-1)
621
622static inline bool is_valid_cs(s8 chip_select)
623{
624 return chip_select != SPI_INVALID_CS;
625}
626
627static inline int spi_dev_check_cs(struct device *dev,
628 struct spi_device *spi, u8 idx,
629 struct spi_device *new_spi, u8 new_idx)
630{
631 u8 cs, cs_new;
632 u8 idx_new;
633
634 cs = spi_get_chipselect(spi, idx);
635 for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
636 cs_new = spi_get_chipselect(new_spi, idx_new);
637 if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
638 dev_err(dev, "chipselect %u already in use\n", cs_new);
639 return -EBUSY;
640 }
641 }
642 return 0;
643}
644
645static int spi_dev_check(struct device *dev, void *data)
646{
647 struct spi_device *spi = to_spi_device(dev);
648 struct spi_device *new_spi = data;
649 int status, idx;
650
651 if (spi->controller == new_spi->controller) {
652 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
653 status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
654 if (status)
655 return status;
656 }
657 }
658 return 0;
659}
660
661static void spi_cleanup(struct spi_device *spi)
662{
663 if (spi->controller->cleanup)
664 spi->controller->cleanup(spi);
665}
666
667static int __spi_add_device(struct spi_device *spi)
668{
669 struct spi_controller *ctlr = spi->controller;
670 struct device *dev = ctlr->dev.parent;
671 int status, idx;
672 u8 cs;
673
674 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
675 /* Chipselects are numbered 0..max; validate. */
676 cs = spi_get_chipselect(spi, idx);
677 if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
678 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
679 ctlr->num_chipselect);
680 return -EINVAL;
681 }
682 }
683
684 /*
685 * Make sure that multiple logical CS doesn't map to the same physical CS.
686 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
687 */
688 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
689 status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
690 if (status)
691 return status;
692 }
693
694 /* Set the bus ID string */
695 spi_dev_set_name(spi);
696
697 /*
698 * We need to make sure there's no other device with this
699 * chipselect **BEFORE** we call setup(), else we'll trash
700 * its configuration.
701 */
702 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
703 if (status)
704 return status;
705
706 /* Controller may unregister concurrently */
707 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
708 !device_is_registered(&ctlr->dev)) {
709 return -ENODEV;
710 }
711
712 if (ctlr->cs_gpiods) {
713 u8 cs;
714
715 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
716 cs = spi_get_chipselect(spi, idx);
717 if (is_valid_cs(cs))
718 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
719 }
720 }
721
722 /*
723 * Drivers may modify this initial i/o setup, but will
724 * normally rely on the device being setup. Devices
725 * using SPI_CS_HIGH can't coexist well otherwise...
726 */
727 status = spi_setup(spi);
728 if (status < 0) {
729 dev_err(dev, "can't setup %s, status %d\n",
730 dev_name(&spi->dev), status);
731 return status;
732 }
733
734 /* Device may be bound to an active driver when this returns */
735 status = device_add(&spi->dev);
736 if (status < 0) {
737 dev_err(dev, "can't add %s, status %d\n",
738 dev_name(&spi->dev), status);
739 spi_cleanup(spi);
740 } else {
741 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
742 }
743
744 return status;
745}
746
747/**
748 * spi_add_device - Add spi_device allocated with spi_alloc_device
749 * @spi: spi_device to register
750 *
751 * Companion function to spi_alloc_device. Devices allocated with
752 * spi_alloc_device can be added onto the SPI bus with this function.
753 *
754 * Return: 0 on success; negative errno on failure
755 */
756int spi_add_device(struct spi_device *spi)
757{
758 struct spi_controller *ctlr = spi->controller;
759 int status;
760
761 /* Set the bus ID string */
762 spi_dev_set_name(spi);
763
764 mutex_lock(&ctlr->add_lock);
765 status = __spi_add_device(spi);
766 mutex_unlock(&ctlr->add_lock);
767 return status;
768}
769EXPORT_SYMBOL_GPL(spi_add_device);
770
771static void spi_set_all_cs_unused(struct spi_device *spi)
772{
773 u8 idx;
774
775 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
776 spi_set_chipselect(spi, idx, SPI_INVALID_CS);
777}
778
779/**
780 * spi_new_device - instantiate one new SPI device
781 * @ctlr: Controller to which device is connected
782 * @chip: Describes the SPI device
783 * Context: can sleep
784 *
785 * On typical mainboards, this is purely internal; and it's not needed
786 * after board init creates the hard-wired devices. Some development
787 * platforms may not be able to use spi_register_board_info though, and
788 * this is exported so that for example a USB or parport based adapter
789 * driver could add devices (which it would learn about out-of-band).
790 *
791 * Return: the new device, or NULL.
792 */
793struct spi_device *spi_new_device(struct spi_controller *ctlr,
794 struct spi_board_info *chip)
795{
796 struct spi_device *proxy;
797 int status;
798
799 /*
800 * NOTE: caller did any chip->bus_num checks necessary.
801 *
802 * Also, unless we change the return value convention to use
803 * error-or-pointer (not NULL-or-pointer), troubleshootability
804 * suggests syslogged diagnostics are best here (ugh).
805 */
806
807 proxy = spi_alloc_device(ctlr);
808 if (!proxy)
809 return NULL;
810
811 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
812
813 /* Use provided chip-select for proxy device */
814 spi_set_all_cs_unused(proxy);
815 spi_set_chipselect(proxy, 0, chip->chip_select);
816
817 proxy->max_speed_hz = chip->max_speed_hz;
818 proxy->mode = chip->mode;
819 proxy->irq = chip->irq;
820 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
821 proxy->dev.platform_data = (void *) chip->platform_data;
822 proxy->controller_data = chip->controller_data;
823 proxy->controller_state = NULL;
824 /*
825 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
826 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
827 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
828 * spi->chip_select[0] will give the physical CS.
829 * By default spi->chip_select[0] will hold the physical CS number so, set
830 * spi->cs_index_mask as 0x01.
831 */
832 proxy->cs_index_mask = 0x01;
833
834 if (chip->swnode) {
835 status = device_add_software_node(&proxy->dev, chip->swnode);
836 if (status) {
837 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
838 chip->modalias, status);
839 goto err_dev_put;
840 }
841 }
842
843 status = spi_add_device(proxy);
844 if (status < 0)
845 goto err_dev_put;
846
847 return proxy;
848
849err_dev_put:
850 device_remove_software_node(&proxy->dev);
851 spi_dev_put(proxy);
852 return NULL;
853}
854EXPORT_SYMBOL_GPL(spi_new_device);
855
856/**
857 * spi_unregister_device - unregister a single SPI device
858 * @spi: spi_device to unregister
859 *
860 * Start making the passed SPI device vanish. Normally this would be handled
861 * by spi_unregister_controller().
862 */
863void spi_unregister_device(struct spi_device *spi)
864{
865 if (!spi)
866 return;
867
868 if (spi->dev.of_node) {
869 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
870 of_node_put(spi->dev.of_node);
871 }
872 if (ACPI_COMPANION(&spi->dev))
873 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
874 device_remove_software_node(&spi->dev);
875 device_del(&spi->dev);
876 spi_cleanup(spi);
877 put_device(&spi->dev);
878}
879EXPORT_SYMBOL_GPL(spi_unregister_device);
880
881static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
882 struct spi_board_info *bi)
883{
884 struct spi_device *dev;
885
886 if (ctlr->bus_num != bi->bus_num)
887 return;
888
889 dev = spi_new_device(ctlr, bi);
890 if (!dev)
891 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
892 bi->modalias);
893}
894
895/**
896 * spi_register_board_info - register SPI devices for a given board
897 * @info: array of chip descriptors
898 * @n: how many descriptors are provided
899 * Context: can sleep
900 *
901 * Board-specific early init code calls this (probably during arch_initcall)
902 * with segments of the SPI device table. Any device nodes are created later,
903 * after the relevant parent SPI controller (bus_num) is defined. We keep
904 * this table of devices forever, so that reloading a controller driver will
905 * not make Linux forget about these hard-wired devices.
906 *
907 * Other code can also call this, e.g. a particular add-on board might provide
908 * SPI devices through its expansion connector, so code initializing that board
909 * would naturally declare its SPI devices.
910 *
911 * The board info passed can safely be __initdata ... but be careful of
912 * any embedded pointers (platform_data, etc), they're copied as-is.
913 *
914 * Return: zero on success, else a negative error code.
915 */
916int spi_register_board_info(struct spi_board_info const *info, unsigned n)
917{
918 struct boardinfo *bi;
919 int i;
920
921 if (!n)
922 return 0;
923
924 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
925 if (!bi)
926 return -ENOMEM;
927
928 for (i = 0; i < n; i++, bi++, info++) {
929 struct spi_controller *ctlr;
930
931 memcpy(&bi->board_info, info, sizeof(*info));
932
933 mutex_lock(&board_lock);
934 list_add_tail(&bi->list, &board_list);
935 list_for_each_entry(ctlr, &spi_controller_list, list)
936 spi_match_controller_to_boardinfo(ctlr,
937 &bi->board_info);
938 mutex_unlock(&board_lock);
939 }
940
941 return 0;
942}
943
944/*-------------------------------------------------------------------------*/
945
946/* Core methods for SPI resource management */
947
948/**
949 * spi_res_alloc - allocate a spi resource that is life-cycle managed
950 * during the processing of a spi_message while using
951 * spi_transfer_one
952 * @spi: the SPI device for which we allocate memory
953 * @release: the release code to execute for this resource
954 * @size: size to alloc and return
955 * @gfp: GFP allocation flags
956 *
957 * Return: the pointer to the allocated data
958 *
959 * This may get enhanced in the future to allocate from a memory pool
960 * of the @spi_device or @spi_controller to avoid repeated allocations.
961 */
962static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
963 size_t size, gfp_t gfp)
964{
965 struct spi_res *sres;
966
967 sres = kzalloc(sizeof(*sres) + size, gfp);
968 if (!sres)
969 return NULL;
970
971 INIT_LIST_HEAD(&sres->entry);
972 sres->release = release;
973
974 return sres->data;
975}
976
977/**
978 * spi_res_free - free an SPI resource
979 * @res: pointer to the custom data of a resource
980 */
981static void spi_res_free(void *res)
982{
983 struct spi_res *sres = container_of(res, struct spi_res, data);
984
985 if (!res)
986 return;
987
988 WARN_ON(!list_empty(&sres->entry));
989 kfree(sres);
990}
991
992/**
993 * spi_res_add - add a spi_res to the spi_message
994 * @message: the SPI message
995 * @res: the spi_resource
996 */
997static void spi_res_add(struct spi_message *message, void *res)
998{
999 struct spi_res *sres = container_of(res, struct spi_res, data);
1000
1001 WARN_ON(!list_empty(&sres->entry));
1002 list_add_tail(&sres->entry, &message->resources);
1003}
1004
1005/**
1006 * spi_res_release - release all SPI resources for this message
1007 * @ctlr: the @spi_controller
1008 * @message: the @spi_message
1009 */
1010static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1011{
1012 struct spi_res *res, *tmp;
1013
1014 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1015 if (res->release)
1016 res->release(ctlr, message, res->data);
1017
1018 list_del(&res->entry);
1019
1020 kfree(res);
1021 }
1022}
1023
1024/*-------------------------------------------------------------------------*/
1025static inline bool spi_is_last_cs(struct spi_device *spi)
1026{
1027 u8 idx;
1028 bool last = false;
1029
1030 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
1031 if (spi->cs_index_mask & BIT(idx)) {
1032 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1033 last = true;
1034 }
1035 }
1036 return last;
1037}
1038
1039
1040static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1041{
1042 bool activate = enable;
1043 u8 idx;
1044
1045 /*
1046 * Avoid calling into the driver (or doing delays) if the chip select
1047 * isn't actually changing from the last time this was called.
1048 */
1049 if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1050 spi_is_last_cs(spi)) ||
1051 (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1052 !spi_is_last_cs(spi))) &&
1053 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1054 return;
1055
1056 trace_spi_set_cs(spi, activate);
1057
1058 spi->controller->last_cs_index_mask = spi->cs_index_mask;
1059 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1060 spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1061 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1062
1063 if (spi->mode & SPI_CS_HIGH)
1064 enable = !enable;
1065
1066 /*
1067 * Handle chip select delays for GPIO based CS or controllers without
1068 * programmable chip select timing.
1069 */
1070 if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1071 spi_delay_exec(&spi->cs_hold, NULL);
1072
1073 if (spi_is_csgpiod(spi)) {
1074 if (!(spi->mode & SPI_NO_CS)) {
1075 /*
1076 * Historically ACPI has no means of the GPIO polarity and
1077 * thus the SPISerialBus() resource defines it on the per-chip
1078 * basis. In order to avoid a chain of negations, the GPIO
1079 * polarity is considered being Active High. Even for the cases
1080 * when _DSD() is involved (in the updated versions of ACPI)
1081 * the GPIO CS polarity must be defined Active High to avoid
1082 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1083 * into account.
1084 */
1085 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
1086 if ((spi->cs_index_mask & BIT(idx)) && spi_get_csgpiod(spi, idx)) {
1087 if (has_acpi_companion(&spi->dev))
1088 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
1089 !enable);
1090 else
1091 /* Polarity handled by GPIO library */
1092 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
1093 activate);
1094
1095 if (activate)
1096 spi_delay_exec(&spi->cs_setup, NULL);
1097 else
1098 spi_delay_exec(&spi->cs_inactive, NULL);
1099 }
1100 }
1101 }
1102 /* Some SPI masters need both GPIO CS & slave_select */
1103 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1104 spi->controller->set_cs)
1105 spi->controller->set_cs(spi, !enable);
1106 } else if (spi->controller->set_cs) {
1107 spi->controller->set_cs(spi, !enable);
1108 }
1109
1110 if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1111 if (activate)
1112 spi_delay_exec(&spi->cs_setup, NULL);
1113 else
1114 spi_delay_exec(&spi->cs_inactive, NULL);
1115 }
1116}
1117
1118#ifdef CONFIG_HAS_DMA
1119static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1120 struct sg_table *sgt, void *buf, size_t len,
1121 enum dma_data_direction dir, unsigned long attrs)
1122{
1123 const bool vmalloced_buf = is_vmalloc_addr(buf);
1124 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1125#ifdef CONFIG_HIGHMEM
1126 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1127 (unsigned long)buf < (PKMAP_BASE +
1128 (LAST_PKMAP * PAGE_SIZE)));
1129#else
1130 const bool kmap_buf = false;
1131#endif
1132 int desc_len;
1133 int sgs;
1134 struct page *vm_page;
1135 struct scatterlist *sg;
1136 void *sg_buf;
1137 size_t min;
1138 int i, ret;
1139
1140 if (vmalloced_buf || kmap_buf) {
1141 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1142 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1143 } else if (virt_addr_valid(buf)) {
1144 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1145 sgs = DIV_ROUND_UP(len, desc_len);
1146 } else {
1147 return -EINVAL;
1148 }
1149
1150 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1151 if (ret != 0)
1152 return ret;
1153
1154 sg = &sgt->sgl[0];
1155 for (i = 0; i < sgs; i++) {
1156
1157 if (vmalloced_buf || kmap_buf) {
1158 /*
1159 * Next scatterlist entry size is the minimum between
1160 * the desc_len and the remaining buffer length that
1161 * fits in a page.
1162 */
1163 min = min_t(size_t, desc_len,
1164 min_t(size_t, len,
1165 PAGE_SIZE - offset_in_page(buf)));
1166 if (vmalloced_buf)
1167 vm_page = vmalloc_to_page(buf);
1168 else
1169 vm_page = kmap_to_page(buf);
1170 if (!vm_page) {
1171 sg_free_table(sgt);
1172 return -ENOMEM;
1173 }
1174 sg_set_page(sg, vm_page,
1175 min, offset_in_page(buf));
1176 } else {
1177 min = min_t(size_t, len, desc_len);
1178 sg_buf = buf;
1179 sg_set_buf(sg, sg_buf, min);
1180 }
1181
1182 buf += min;
1183 len -= min;
1184 sg = sg_next(sg);
1185 }
1186
1187 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1188 if (ret < 0) {
1189 sg_free_table(sgt);
1190 return ret;
1191 }
1192
1193 return 0;
1194}
1195
1196int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1197 struct sg_table *sgt, void *buf, size_t len,
1198 enum dma_data_direction dir)
1199{
1200 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1201}
1202
1203static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1204 struct device *dev, struct sg_table *sgt,
1205 enum dma_data_direction dir,
1206 unsigned long attrs)
1207{
1208 if (sgt->orig_nents) {
1209 dma_unmap_sgtable(dev, sgt, dir, attrs);
1210 sg_free_table(sgt);
1211 sgt->orig_nents = 0;
1212 sgt->nents = 0;
1213 }
1214}
1215
1216void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1217 struct sg_table *sgt, enum dma_data_direction dir)
1218{
1219 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1220}
1221
1222static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1223{
1224 struct device *tx_dev, *rx_dev;
1225 struct spi_transfer *xfer;
1226 int ret;
1227
1228 if (!ctlr->can_dma)
1229 return 0;
1230
1231 if (ctlr->dma_tx)
1232 tx_dev = ctlr->dma_tx->device->dev;
1233 else if (ctlr->dma_map_dev)
1234 tx_dev = ctlr->dma_map_dev;
1235 else
1236 tx_dev = ctlr->dev.parent;
1237
1238 if (ctlr->dma_rx)
1239 rx_dev = ctlr->dma_rx->device->dev;
1240 else if (ctlr->dma_map_dev)
1241 rx_dev = ctlr->dma_map_dev;
1242 else
1243 rx_dev = ctlr->dev.parent;
1244
1245 ret = -ENOMSG;
1246 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1247 /* The sync is done before each transfer. */
1248 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1249
1250 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1251 continue;
1252
1253 if (xfer->tx_buf != NULL) {
1254 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1255 (void *)xfer->tx_buf,
1256 xfer->len, DMA_TO_DEVICE,
1257 attrs);
1258 if (ret != 0)
1259 return ret;
1260 }
1261
1262 if (xfer->rx_buf != NULL) {
1263 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1264 xfer->rx_buf, xfer->len,
1265 DMA_FROM_DEVICE, attrs);
1266 if (ret != 0) {
1267 spi_unmap_buf_attrs(ctlr, tx_dev,
1268 &xfer->tx_sg, DMA_TO_DEVICE,
1269 attrs);
1270
1271 return ret;
1272 }
1273 }
1274 }
1275 /* No transfer has been mapped, bail out with success */
1276 if (ret)
1277 return 0;
1278
1279 ctlr->cur_rx_dma_dev = rx_dev;
1280 ctlr->cur_tx_dma_dev = tx_dev;
1281 ctlr->cur_msg_mapped = true;
1282
1283 return 0;
1284}
1285
1286static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1287{
1288 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1289 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1290 struct spi_transfer *xfer;
1291
1292 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1293 return 0;
1294
1295 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1296 /* The sync has already been done after each transfer. */
1297 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1298
1299 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1300 continue;
1301
1302 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1303 DMA_FROM_DEVICE, attrs);
1304 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1305 DMA_TO_DEVICE, attrs);
1306 }
1307
1308 ctlr->cur_msg_mapped = false;
1309
1310 return 0;
1311}
1312
1313static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1314 struct spi_transfer *xfer)
1315{
1316 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1317 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1318
1319 if (!ctlr->cur_msg_mapped)
1320 return;
1321
1322 if (xfer->tx_sg.orig_nents)
1323 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1324 if (xfer->rx_sg.orig_nents)
1325 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1326}
1327
1328static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1329 struct spi_transfer *xfer)
1330{
1331 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1332 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1333
1334 if (!ctlr->cur_msg_mapped)
1335 return;
1336
1337 if (xfer->rx_sg.orig_nents)
1338 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1339 if (xfer->tx_sg.orig_nents)
1340 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1341}
1342#else /* !CONFIG_HAS_DMA */
1343static inline int __spi_map_msg(struct spi_controller *ctlr,
1344 struct spi_message *msg)
1345{
1346 return 0;
1347}
1348
1349static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1350 struct spi_message *msg)
1351{
1352 return 0;
1353}
1354
1355static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1356 struct spi_transfer *xfer)
1357{
1358}
1359
1360static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1361 struct spi_transfer *xfer)
1362{
1363}
1364#endif /* !CONFIG_HAS_DMA */
1365
1366static inline int spi_unmap_msg(struct spi_controller *ctlr,
1367 struct spi_message *msg)
1368{
1369 struct spi_transfer *xfer;
1370
1371 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1372 /*
1373 * Restore the original value of tx_buf or rx_buf if they are
1374 * NULL.
1375 */
1376 if (xfer->tx_buf == ctlr->dummy_tx)
1377 xfer->tx_buf = NULL;
1378 if (xfer->rx_buf == ctlr->dummy_rx)
1379 xfer->rx_buf = NULL;
1380 }
1381
1382 return __spi_unmap_msg(ctlr, msg);
1383}
1384
1385static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1386{
1387 struct spi_transfer *xfer;
1388 void *tmp;
1389 unsigned int max_tx, max_rx;
1390
1391 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1392 && !(msg->spi->mode & SPI_3WIRE)) {
1393 max_tx = 0;
1394 max_rx = 0;
1395
1396 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1397 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1398 !xfer->tx_buf)
1399 max_tx = max(xfer->len, max_tx);
1400 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1401 !xfer->rx_buf)
1402 max_rx = max(xfer->len, max_rx);
1403 }
1404
1405 if (max_tx) {
1406 tmp = krealloc(ctlr->dummy_tx, max_tx,
1407 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1408 if (!tmp)
1409 return -ENOMEM;
1410 ctlr->dummy_tx = tmp;
1411 }
1412
1413 if (max_rx) {
1414 tmp = krealloc(ctlr->dummy_rx, max_rx,
1415 GFP_KERNEL | GFP_DMA);
1416 if (!tmp)
1417 return -ENOMEM;
1418 ctlr->dummy_rx = tmp;
1419 }
1420
1421 if (max_tx || max_rx) {
1422 list_for_each_entry(xfer, &msg->transfers,
1423 transfer_list) {
1424 if (!xfer->len)
1425 continue;
1426 if (!xfer->tx_buf)
1427 xfer->tx_buf = ctlr->dummy_tx;
1428 if (!xfer->rx_buf)
1429 xfer->rx_buf = ctlr->dummy_rx;
1430 }
1431 }
1432 }
1433
1434 return __spi_map_msg(ctlr, msg);
1435}
1436
1437static int spi_transfer_wait(struct spi_controller *ctlr,
1438 struct spi_message *msg,
1439 struct spi_transfer *xfer)
1440{
1441 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1442 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1443 u32 speed_hz = xfer->speed_hz;
1444 unsigned long long ms;
1445
1446 if (spi_controller_is_slave(ctlr)) {
1447 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1448 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1449 return -EINTR;
1450 }
1451 } else {
1452 if (!speed_hz)
1453 speed_hz = 100000;
1454
1455 /*
1456 * For each byte we wait for 8 cycles of the SPI clock.
1457 * Since speed is defined in Hz and we want milliseconds,
1458 * use respective multiplier, but before the division,
1459 * otherwise we may get 0 for short transfers.
1460 */
1461 ms = 8LL * MSEC_PER_SEC * xfer->len;
1462 do_div(ms, speed_hz);
1463
1464 /*
1465 * Increase it twice and add 200 ms tolerance, use
1466 * predefined maximum in case of overflow.
1467 */
1468 ms += ms + 200;
1469 if (ms > UINT_MAX)
1470 ms = UINT_MAX;
1471
1472 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1473 msecs_to_jiffies(ms));
1474
1475 if (ms == 0) {
1476 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1477 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1478 dev_err(&msg->spi->dev,
1479 "SPI transfer timed out\n");
1480 return -ETIMEDOUT;
1481 }
1482
1483 if (xfer->error & SPI_TRANS_FAIL_IO)
1484 return -EIO;
1485 }
1486
1487 return 0;
1488}
1489
1490static void _spi_transfer_delay_ns(u32 ns)
1491{
1492 if (!ns)
1493 return;
1494 if (ns <= NSEC_PER_USEC) {
1495 ndelay(ns);
1496 } else {
1497 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1498
1499 if (us <= 10)
1500 udelay(us);
1501 else
1502 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1503 }
1504}
1505
1506int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1507{
1508 u32 delay = _delay->value;
1509 u32 unit = _delay->unit;
1510 u32 hz;
1511
1512 if (!delay)
1513 return 0;
1514
1515 switch (unit) {
1516 case SPI_DELAY_UNIT_USECS:
1517 delay *= NSEC_PER_USEC;
1518 break;
1519 case SPI_DELAY_UNIT_NSECS:
1520 /* Nothing to do here */
1521 break;
1522 case SPI_DELAY_UNIT_SCK:
1523 /* Clock cycles need to be obtained from spi_transfer */
1524 if (!xfer)
1525 return -EINVAL;
1526 /*
1527 * If there is unknown effective speed, approximate it
1528 * by underestimating with half of the requested Hz.
1529 */
1530 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1531 if (!hz)
1532 return -EINVAL;
1533
1534 /* Convert delay to nanoseconds */
1535 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1536 break;
1537 default:
1538 return -EINVAL;
1539 }
1540
1541 return delay;
1542}
1543EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1544
1545int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1546{
1547 int delay;
1548
1549 might_sleep();
1550
1551 if (!_delay)
1552 return -EINVAL;
1553
1554 delay = spi_delay_to_ns(_delay, xfer);
1555 if (delay < 0)
1556 return delay;
1557
1558 _spi_transfer_delay_ns(delay);
1559
1560 return 0;
1561}
1562EXPORT_SYMBOL_GPL(spi_delay_exec);
1563
1564static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1565 struct spi_transfer *xfer)
1566{
1567 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1568 u32 delay = xfer->cs_change_delay.value;
1569 u32 unit = xfer->cs_change_delay.unit;
1570 int ret;
1571
1572 /* Return early on "fast" mode - for everything but USECS */
1573 if (!delay) {
1574 if (unit == SPI_DELAY_UNIT_USECS)
1575 _spi_transfer_delay_ns(default_delay_ns);
1576 return;
1577 }
1578
1579 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1580 if (ret) {
1581 dev_err_once(&msg->spi->dev,
1582 "Use of unsupported delay unit %i, using default of %luus\n",
1583 unit, default_delay_ns / NSEC_PER_USEC);
1584 _spi_transfer_delay_ns(default_delay_ns);
1585 }
1586}
1587
1588void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1589 struct spi_transfer *xfer)
1590{
1591 _spi_transfer_cs_change_delay(msg, xfer);
1592}
1593EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1594
1595/*
1596 * spi_transfer_one_message - Default implementation of transfer_one_message()
1597 *
1598 * This is a standard implementation of transfer_one_message() for
1599 * drivers which implement a transfer_one() operation. It provides
1600 * standard handling of delays and chip select management.
1601 */
1602static int spi_transfer_one_message(struct spi_controller *ctlr,
1603 struct spi_message *msg)
1604{
1605 struct spi_transfer *xfer;
1606 bool keep_cs = false;
1607 int ret = 0;
1608 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1609 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1610
1611 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1612 spi_set_cs(msg->spi, !xfer->cs_off, false);
1613
1614 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1615 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1616
1617 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1618 trace_spi_transfer_start(msg, xfer);
1619
1620 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1621 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1622
1623 if (!ctlr->ptp_sts_supported) {
1624 xfer->ptp_sts_word_pre = 0;
1625 ptp_read_system_prets(xfer->ptp_sts);
1626 }
1627
1628 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1629 reinit_completion(&ctlr->xfer_completion);
1630
1631fallback_pio:
1632 spi_dma_sync_for_device(ctlr, xfer);
1633 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1634 if (ret < 0) {
1635 spi_dma_sync_for_cpu(ctlr, xfer);
1636
1637 if (ctlr->cur_msg_mapped &&
1638 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1639 __spi_unmap_msg(ctlr, msg);
1640 ctlr->fallback = true;
1641 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1642 goto fallback_pio;
1643 }
1644
1645 SPI_STATISTICS_INCREMENT_FIELD(statm,
1646 errors);
1647 SPI_STATISTICS_INCREMENT_FIELD(stats,
1648 errors);
1649 dev_err(&msg->spi->dev,
1650 "SPI transfer failed: %d\n", ret);
1651 goto out;
1652 }
1653
1654 if (ret > 0) {
1655 ret = spi_transfer_wait(ctlr, msg, xfer);
1656 if (ret < 0)
1657 msg->status = ret;
1658 }
1659
1660 spi_dma_sync_for_cpu(ctlr, xfer);
1661 } else {
1662 if (xfer->len)
1663 dev_err(&msg->spi->dev,
1664 "Bufferless transfer has length %u\n",
1665 xfer->len);
1666 }
1667
1668 if (!ctlr->ptp_sts_supported) {
1669 ptp_read_system_postts(xfer->ptp_sts);
1670 xfer->ptp_sts_word_post = xfer->len;
1671 }
1672
1673 trace_spi_transfer_stop(msg, xfer);
1674
1675 if (msg->status != -EINPROGRESS)
1676 goto out;
1677
1678 spi_transfer_delay_exec(xfer);
1679
1680 if (xfer->cs_change) {
1681 if (list_is_last(&xfer->transfer_list,
1682 &msg->transfers)) {
1683 keep_cs = true;
1684 } else {
1685 if (!xfer->cs_off)
1686 spi_set_cs(msg->spi, false, false);
1687 _spi_transfer_cs_change_delay(msg, xfer);
1688 if (!list_next_entry(xfer, transfer_list)->cs_off)
1689 spi_set_cs(msg->spi, true, false);
1690 }
1691 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1692 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1693 spi_set_cs(msg->spi, xfer->cs_off, false);
1694 }
1695
1696 msg->actual_length += xfer->len;
1697 }
1698
1699out:
1700 if (ret != 0 || !keep_cs)
1701 spi_set_cs(msg->spi, false, false);
1702
1703 if (msg->status == -EINPROGRESS)
1704 msg->status = ret;
1705
1706 if (msg->status && ctlr->handle_err)
1707 ctlr->handle_err(ctlr, msg);
1708
1709 spi_finalize_current_message(ctlr);
1710
1711 return ret;
1712}
1713
1714/**
1715 * spi_finalize_current_transfer - report completion of a transfer
1716 * @ctlr: the controller reporting completion
1717 *
1718 * Called by SPI drivers using the core transfer_one_message()
1719 * implementation to notify it that the current interrupt driven
1720 * transfer has finished and the next one may be scheduled.
1721 */
1722void spi_finalize_current_transfer(struct spi_controller *ctlr)
1723{
1724 complete(&ctlr->xfer_completion);
1725}
1726EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1727
1728static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1729{
1730 if (ctlr->auto_runtime_pm) {
1731 pm_runtime_mark_last_busy(ctlr->dev.parent);
1732 pm_runtime_put_autosuspend(ctlr->dev.parent);
1733 }
1734}
1735
1736static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1737 struct spi_message *msg, bool was_busy)
1738{
1739 struct spi_transfer *xfer;
1740 int ret;
1741
1742 if (!was_busy && ctlr->auto_runtime_pm) {
1743 ret = pm_runtime_get_sync(ctlr->dev.parent);
1744 if (ret < 0) {
1745 pm_runtime_put_noidle(ctlr->dev.parent);
1746 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1747 ret);
1748
1749 msg->status = ret;
1750 spi_finalize_current_message(ctlr);
1751
1752 return ret;
1753 }
1754 }
1755
1756 if (!was_busy)
1757 trace_spi_controller_busy(ctlr);
1758
1759 if (!was_busy && ctlr->prepare_transfer_hardware) {
1760 ret = ctlr->prepare_transfer_hardware(ctlr);
1761 if (ret) {
1762 dev_err(&ctlr->dev,
1763 "failed to prepare transfer hardware: %d\n",
1764 ret);
1765
1766 if (ctlr->auto_runtime_pm)
1767 pm_runtime_put(ctlr->dev.parent);
1768
1769 msg->status = ret;
1770 spi_finalize_current_message(ctlr);
1771
1772 return ret;
1773 }
1774 }
1775
1776 trace_spi_message_start(msg);
1777
1778 if (ctlr->prepare_message) {
1779 ret = ctlr->prepare_message(ctlr, msg);
1780 if (ret) {
1781 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1782 ret);
1783 msg->status = ret;
1784 spi_finalize_current_message(ctlr);
1785 return ret;
1786 }
1787 msg->prepared = true;
1788 }
1789
1790 ret = spi_map_msg(ctlr, msg);
1791 if (ret) {
1792 msg->status = ret;
1793 spi_finalize_current_message(ctlr);
1794 return ret;
1795 }
1796
1797 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1798 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1799 xfer->ptp_sts_word_pre = 0;
1800 ptp_read_system_prets(xfer->ptp_sts);
1801 }
1802 }
1803
1804 /*
1805 * Drivers implementation of transfer_one_message() must arrange for
1806 * spi_finalize_current_message() to get called. Most drivers will do
1807 * this in the calling context, but some don't. For those cases, a
1808 * completion is used to guarantee that this function does not return
1809 * until spi_finalize_current_message() is done accessing
1810 * ctlr->cur_msg.
1811 * Use of the following two flags enable to opportunistically skip the
1812 * use of the completion since its use involves expensive spin locks.
1813 * In case of a race with the context that calls
1814 * spi_finalize_current_message() the completion will always be used,
1815 * due to strict ordering of these flags using barriers.
1816 */
1817 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1818 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1819 reinit_completion(&ctlr->cur_msg_completion);
1820 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1821
1822 ret = ctlr->transfer_one_message(ctlr, msg);
1823 if (ret) {
1824 dev_err(&ctlr->dev,
1825 "failed to transfer one message from queue\n");
1826 return ret;
1827 }
1828
1829 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1830 smp_mb(); /* See spi_finalize_current_message()... */
1831 if (READ_ONCE(ctlr->cur_msg_incomplete))
1832 wait_for_completion(&ctlr->cur_msg_completion);
1833
1834 return 0;
1835}
1836
1837/**
1838 * __spi_pump_messages - function which processes SPI message queue
1839 * @ctlr: controller to process queue for
1840 * @in_kthread: true if we are in the context of the message pump thread
1841 *
1842 * This function checks if there is any SPI message in the queue that
1843 * needs processing and if so call out to the driver to initialize hardware
1844 * and transfer each message.
1845 *
1846 * Note that it is called both from the kthread itself and also from
1847 * inside spi_sync(); the queue extraction handling at the top of the
1848 * function should deal with this safely.
1849 */
1850static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1851{
1852 struct spi_message *msg;
1853 bool was_busy = false;
1854 unsigned long flags;
1855 int ret;
1856
1857 /* Take the I/O mutex */
1858 mutex_lock(&ctlr->io_mutex);
1859
1860 /* Lock queue */
1861 spin_lock_irqsave(&ctlr->queue_lock, flags);
1862
1863 /* Make sure we are not already running a message */
1864 if (ctlr->cur_msg)
1865 goto out_unlock;
1866
1867 /* Check if the queue is idle */
1868 if (list_empty(&ctlr->queue) || !ctlr->running) {
1869 if (!ctlr->busy)
1870 goto out_unlock;
1871
1872 /* Defer any non-atomic teardown to the thread */
1873 if (!in_kthread) {
1874 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1875 !ctlr->unprepare_transfer_hardware) {
1876 spi_idle_runtime_pm(ctlr);
1877 ctlr->busy = false;
1878 ctlr->queue_empty = true;
1879 trace_spi_controller_idle(ctlr);
1880 } else {
1881 kthread_queue_work(ctlr->kworker,
1882 &ctlr->pump_messages);
1883 }
1884 goto out_unlock;
1885 }
1886
1887 ctlr->busy = false;
1888 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1889
1890 kfree(ctlr->dummy_rx);
1891 ctlr->dummy_rx = NULL;
1892 kfree(ctlr->dummy_tx);
1893 ctlr->dummy_tx = NULL;
1894 if (ctlr->unprepare_transfer_hardware &&
1895 ctlr->unprepare_transfer_hardware(ctlr))
1896 dev_err(&ctlr->dev,
1897 "failed to unprepare transfer hardware\n");
1898 spi_idle_runtime_pm(ctlr);
1899 trace_spi_controller_idle(ctlr);
1900
1901 spin_lock_irqsave(&ctlr->queue_lock, flags);
1902 ctlr->queue_empty = true;
1903 goto out_unlock;
1904 }
1905
1906 /* Extract head of queue */
1907 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1908 ctlr->cur_msg = msg;
1909
1910 list_del_init(&msg->queue);
1911 if (ctlr->busy)
1912 was_busy = true;
1913 else
1914 ctlr->busy = true;
1915 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1916
1917 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1918 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1919
1920 ctlr->cur_msg = NULL;
1921 ctlr->fallback = false;
1922
1923 mutex_unlock(&ctlr->io_mutex);
1924
1925 /* Prod the scheduler in case transfer_one() was busy waiting */
1926 if (!ret)
1927 cond_resched();
1928 return;
1929
1930out_unlock:
1931 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1932 mutex_unlock(&ctlr->io_mutex);
1933}
1934
1935/**
1936 * spi_pump_messages - kthread work function which processes spi message queue
1937 * @work: pointer to kthread work struct contained in the controller struct
1938 */
1939static void spi_pump_messages(struct kthread_work *work)
1940{
1941 struct spi_controller *ctlr =
1942 container_of(work, struct spi_controller, pump_messages);
1943
1944 __spi_pump_messages(ctlr, true);
1945}
1946
1947/**
1948 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1949 * @ctlr: Pointer to the spi_controller structure of the driver
1950 * @xfer: Pointer to the transfer being timestamped
1951 * @progress: How many words (not bytes) have been transferred so far
1952 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1953 * transfer, for less jitter in time measurement. Only compatible
1954 * with PIO drivers. If true, must follow up with
1955 * spi_take_timestamp_post or otherwise system will crash.
1956 * WARNING: for fully predictable results, the CPU frequency must
1957 * also be under control (governor).
1958 *
1959 * This is a helper for drivers to collect the beginning of the TX timestamp
1960 * for the requested byte from the SPI transfer. The frequency with which this
1961 * function must be called (once per word, once for the whole transfer, once
1962 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1963 * greater than or equal to the requested byte at the time of the call. The
1964 * timestamp is only taken once, at the first such call. It is assumed that
1965 * the driver advances its @tx buffer pointer monotonically.
1966 */
1967void spi_take_timestamp_pre(struct spi_controller *ctlr,
1968 struct spi_transfer *xfer,
1969 size_t progress, bool irqs_off)
1970{
1971 if (!xfer->ptp_sts)
1972 return;
1973
1974 if (xfer->timestamped)
1975 return;
1976
1977 if (progress > xfer->ptp_sts_word_pre)
1978 return;
1979
1980 /* Capture the resolution of the timestamp */
1981 xfer->ptp_sts_word_pre = progress;
1982
1983 if (irqs_off) {
1984 local_irq_save(ctlr->irq_flags);
1985 preempt_disable();
1986 }
1987
1988 ptp_read_system_prets(xfer->ptp_sts);
1989}
1990EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1991
1992/**
1993 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1994 * @ctlr: Pointer to the spi_controller structure of the driver
1995 * @xfer: Pointer to the transfer being timestamped
1996 * @progress: How many words (not bytes) have been transferred so far
1997 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1998 *
1999 * This is a helper for drivers to collect the end of the TX timestamp for
2000 * the requested byte from the SPI transfer. Can be called with an arbitrary
2001 * frequency: only the first call where @tx exceeds or is equal to the
2002 * requested word will be timestamped.
2003 */
2004void spi_take_timestamp_post(struct spi_controller *ctlr,
2005 struct spi_transfer *xfer,
2006 size_t progress, bool irqs_off)
2007{
2008 if (!xfer->ptp_sts)
2009 return;
2010
2011 if (xfer->timestamped)
2012 return;
2013
2014 if (progress < xfer->ptp_sts_word_post)
2015 return;
2016
2017 ptp_read_system_postts(xfer->ptp_sts);
2018
2019 if (irqs_off) {
2020 local_irq_restore(ctlr->irq_flags);
2021 preempt_enable();
2022 }
2023
2024 /* Capture the resolution of the timestamp */
2025 xfer->ptp_sts_word_post = progress;
2026
2027 xfer->timestamped = 1;
2028}
2029EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2030
2031/**
2032 * spi_set_thread_rt - set the controller to pump at realtime priority
2033 * @ctlr: controller to boost priority of
2034 *
2035 * This can be called because the controller requested realtime priority
2036 * (by setting the ->rt value before calling spi_register_controller()) or
2037 * because a device on the bus said that its transfers needed realtime
2038 * priority.
2039 *
2040 * NOTE: at the moment if any device on a bus says it needs realtime then
2041 * the thread will be at realtime priority for all transfers on that
2042 * controller. If this eventually becomes a problem we may see if we can
2043 * find a way to boost the priority only temporarily during relevant
2044 * transfers.
2045 */
2046static void spi_set_thread_rt(struct spi_controller *ctlr)
2047{
2048 dev_info(&ctlr->dev,
2049 "will run message pump with realtime priority\n");
2050 sched_set_fifo(ctlr->kworker->task);
2051}
2052
2053static int spi_init_queue(struct spi_controller *ctlr)
2054{
2055 ctlr->running = false;
2056 ctlr->busy = false;
2057 ctlr->queue_empty = true;
2058
2059 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
2060 if (IS_ERR(ctlr->kworker)) {
2061 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2062 return PTR_ERR(ctlr->kworker);
2063 }
2064
2065 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2066
2067 /*
2068 * Controller config will indicate if this controller should run the
2069 * message pump with high (realtime) priority to reduce the transfer
2070 * latency on the bus by minimising the delay between a transfer
2071 * request and the scheduling of the message pump thread. Without this
2072 * setting the message pump thread will remain at default priority.
2073 */
2074 if (ctlr->rt)
2075 spi_set_thread_rt(ctlr);
2076
2077 return 0;
2078}
2079
2080/**
2081 * spi_get_next_queued_message() - called by driver to check for queued
2082 * messages
2083 * @ctlr: the controller to check for queued messages
2084 *
2085 * If there are more messages in the queue, the next message is returned from
2086 * this call.
2087 *
2088 * Return: the next message in the queue, else NULL if the queue is empty.
2089 */
2090struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2091{
2092 struct spi_message *next;
2093 unsigned long flags;
2094
2095 /* Get a pointer to the next message, if any */
2096 spin_lock_irqsave(&ctlr->queue_lock, flags);
2097 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2098 queue);
2099 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2100
2101 return next;
2102}
2103EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2104
2105/*
2106 * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2107 * and spi_maybe_unoptimize_message()
2108 * @msg: the message to unoptimize
2109 *
2110 * Peripheral drivers should use spi_unoptimize_message() and callers inside
2111 * core should use spi_maybe_unoptimize_message() rather than calling this
2112 * function directly.
2113 *
2114 * It is not valid to call this on a message that is not currently optimized.
2115 */
2116static void __spi_unoptimize_message(struct spi_message *msg)
2117{
2118 struct spi_controller *ctlr = msg->spi->controller;
2119
2120 if (ctlr->unoptimize_message)
2121 ctlr->unoptimize_message(msg);
2122
2123 spi_res_release(ctlr, msg);
2124
2125 msg->optimized = false;
2126 msg->opt_state = NULL;
2127}
2128
2129/*
2130 * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2131 * @msg: the message to unoptimize
2132 *
2133 * This function is used to unoptimize a message if and only if it was
2134 * optimized by the core (via spi_maybe_optimize_message()).
2135 */
2136static void spi_maybe_unoptimize_message(struct spi_message *msg)
2137{
2138 if (!msg->pre_optimized && msg->optimized)
2139 __spi_unoptimize_message(msg);
2140}
2141
2142/**
2143 * spi_finalize_current_message() - the current message is complete
2144 * @ctlr: the controller to return the message to
2145 *
2146 * Called by the driver to notify the core that the message in the front of the
2147 * queue is complete and can be removed from the queue.
2148 */
2149void spi_finalize_current_message(struct spi_controller *ctlr)
2150{
2151 struct spi_transfer *xfer;
2152 struct spi_message *mesg;
2153 int ret;
2154
2155 mesg = ctlr->cur_msg;
2156
2157 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2158 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2159 ptp_read_system_postts(xfer->ptp_sts);
2160 xfer->ptp_sts_word_post = xfer->len;
2161 }
2162 }
2163
2164 if (unlikely(ctlr->ptp_sts_supported))
2165 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2166 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2167
2168 spi_unmap_msg(ctlr, mesg);
2169
2170 if (mesg->prepared && ctlr->unprepare_message) {
2171 ret = ctlr->unprepare_message(ctlr, mesg);
2172 if (ret) {
2173 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2174 ret);
2175 }
2176 }
2177
2178 mesg->prepared = false;
2179
2180 spi_maybe_unoptimize_message(mesg);
2181
2182 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2183 smp_mb(); /* See __spi_pump_transfer_message()... */
2184 if (READ_ONCE(ctlr->cur_msg_need_completion))
2185 complete(&ctlr->cur_msg_completion);
2186
2187 trace_spi_message_done(mesg);
2188
2189 mesg->state = NULL;
2190 if (mesg->complete)
2191 mesg->complete(mesg->context);
2192}
2193EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2194
2195static int spi_start_queue(struct spi_controller *ctlr)
2196{
2197 unsigned long flags;
2198
2199 spin_lock_irqsave(&ctlr->queue_lock, flags);
2200
2201 if (ctlr->running || ctlr->busy) {
2202 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2203 return -EBUSY;
2204 }
2205
2206 ctlr->running = true;
2207 ctlr->cur_msg = NULL;
2208 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2209
2210 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2211
2212 return 0;
2213}
2214
2215static int spi_stop_queue(struct spi_controller *ctlr)
2216{
2217 unsigned long flags;
2218 unsigned limit = 500;
2219 int ret = 0;
2220
2221 spin_lock_irqsave(&ctlr->queue_lock, flags);
2222
2223 /*
2224 * This is a bit lame, but is optimized for the common execution path.
2225 * A wait_queue on the ctlr->busy could be used, but then the common
2226 * execution path (pump_messages) would be required to call wake_up or
2227 * friends on every SPI message. Do this instead.
2228 */
2229 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2230 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2231 usleep_range(10000, 11000);
2232 spin_lock_irqsave(&ctlr->queue_lock, flags);
2233 }
2234
2235 if (!list_empty(&ctlr->queue) || ctlr->busy)
2236 ret = -EBUSY;
2237 else
2238 ctlr->running = false;
2239
2240 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2241
2242 return ret;
2243}
2244
2245static int spi_destroy_queue(struct spi_controller *ctlr)
2246{
2247 int ret;
2248
2249 ret = spi_stop_queue(ctlr);
2250
2251 /*
2252 * kthread_flush_worker will block until all work is done.
2253 * If the reason that stop_queue timed out is that the work will never
2254 * finish, then it does no good to call flush/stop thread, so
2255 * return anyway.
2256 */
2257 if (ret) {
2258 dev_err(&ctlr->dev, "problem destroying queue\n");
2259 return ret;
2260 }
2261
2262 kthread_destroy_worker(ctlr->kworker);
2263
2264 return 0;
2265}
2266
2267static int __spi_queued_transfer(struct spi_device *spi,
2268 struct spi_message *msg,
2269 bool need_pump)
2270{
2271 struct spi_controller *ctlr = spi->controller;
2272 unsigned long flags;
2273
2274 spin_lock_irqsave(&ctlr->queue_lock, flags);
2275
2276 if (!ctlr->running) {
2277 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2278 return -ESHUTDOWN;
2279 }
2280 msg->actual_length = 0;
2281 msg->status = -EINPROGRESS;
2282
2283 list_add_tail(&msg->queue, &ctlr->queue);
2284 ctlr->queue_empty = false;
2285 if (!ctlr->busy && need_pump)
2286 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2287
2288 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2289 return 0;
2290}
2291
2292/**
2293 * spi_queued_transfer - transfer function for queued transfers
2294 * @spi: SPI device which is requesting transfer
2295 * @msg: SPI message which is to handled is queued to driver queue
2296 *
2297 * Return: zero on success, else a negative error code.
2298 */
2299static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2300{
2301 return __spi_queued_transfer(spi, msg, true);
2302}
2303
2304static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2305{
2306 int ret;
2307
2308 ctlr->transfer = spi_queued_transfer;
2309 if (!ctlr->transfer_one_message)
2310 ctlr->transfer_one_message = spi_transfer_one_message;
2311
2312 /* Initialize and start queue */
2313 ret = spi_init_queue(ctlr);
2314 if (ret) {
2315 dev_err(&ctlr->dev, "problem initializing queue\n");
2316 goto err_init_queue;
2317 }
2318 ctlr->queued = true;
2319 ret = spi_start_queue(ctlr);
2320 if (ret) {
2321 dev_err(&ctlr->dev, "problem starting queue\n");
2322 goto err_start_queue;
2323 }
2324
2325 return 0;
2326
2327err_start_queue:
2328 spi_destroy_queue(ctlr);
2329err_init_queue:
2330 return ret;
2331}
2332
2333/**
2334 * spi_flush_queue - Send all pending messages in the queue from the callers'
2335 * context
2336 * @ctlr: controller to process queue for
2337 *
2338 * This should be used when one wants to ensure all pending messages have been
2339 * sent before doing something. Is used by the spi-mem code to make sure SPI
2340 * memory operations do not preempt regular SPI transfers that have been queued
2341 * before the spi-mem operation.
2342 */
2343void spi_flush_queue(struct spi_controller *ctlr)
2344{
2345 if (ctlr->transfer == spi_queued_transfer)
2346 __spi_pump_messages(ctlr, false);
2347}
2348
2349/*-------------------------------------------------------------------------*/
2350
2351#if defined(CONFIG_OF)
2352static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2353 struct spi_delay *delay, const char *prop)
2354{
2355 u32 value;
2356
2357 if (!of_property_read_u32(nc, prop, &value)) {
2358 if (value > U16_MAX) {
2359 delay->value = DIV_ROUND_UP(value, 1000);
2360 delay->unit = SPI_DELAY_UNIT_USECS;
2361 } else {
2362 delay->value = value;
2363 delay->unit = SPI_DELAY_UNIT_NSECS;
2364 }
2365 }
2366}
2367
2368static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2369 struct device_node *nc)
2370{
2371 u32 value, cs[SPI_CS_CNT_MAX];
2372 int rc, idx;
2373
2374 /* Mode (clock phase/polarity/etc.) */
2375 if (of_property_read_bool(nc, "spi-cpha"))
2376 spi->mode |= SPI_CPHA;
2377 if (of_property_read_bool(nc, "spi-cpol"))
2378 spi->mode |= SPI_CPOL;
2379 if (of_property_read_bool(nc, "spi-3wire"))
2380 spi->mode |= SPI_3WIRE;
2381 if (of_property_read_bool(nc, "spi-lsb-first"))
2382 spi->mode |= SPI_LSB_FIRST;
2383 if (of_property_read_bool(nc, "spi-cs-high"))
2384 spi->mode |= SPI_CS_HIGH;
2385
2386 /* Device DUAL/QUAD mode */
2387 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2388 switch (value) {
2389 case 0:
2390 spi->mode |= SPI_NO_TX;
2391 break;
2392 case 1:
2393 break;
2394 case 2:
2395 spi->mode |= SPI_TX_DUAL;
2396 break;
2397 case 4:
2398 spi->mode |= SPI_TX_QUAD;
2399 break;
2400 case 8:
2401 spi->mode |= SPI_TX_OCTAL;
2402 break;
2403 default:
2404 dev_warn(&ctlr->dev,
2405 "spi-tx-bus-width %d not supported\n",
2406 value);
2407 break;
2408 }
2409 }
2410
2411 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2412 switch (value) {
2413 case 0:
2414 spi->mode |= SPI_NO_RX;
2415 break;
2416 case 1:
2417 break;
2418 case 2:
2419 spi->mode |= SPI_RX_DUAL;
2420 break;
2421 case 4:
2422 spi->mode |= SPI_RX_QUAD;
2423 break;
2424 case 8:
2425 spi->mode |= SPI_RX_OCTAL;
2426 break;
2427 default:
2428 dev_warn(&ctlr->dev,
2429 "spi-rx-bus-width %d not supported\n",
2430 value);
2431 break;
2432 }
2433 }
2434
2435 if (spi_controller_is_slave(ctlr)) {
2436 if (!of_node_name_eq(nc, "slave")) {
2437 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2438 nc);
2439 return -EINVAL;
2440 }
2441 return 0;
2442 }
2443
2444 if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2445 dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2446 return -EINVAL;
2447 }
2448
2449 spi_set_all_cs_unused(spi);
2450
2451 /* Device address */
2452 rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2453 SPI_CS_CNT_MAX);
2454 if (rc < 0) {
2455 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2456 nc, rc);
2457 return rc;
2458 }
2459 if (rc > ctlr->num_chipselect) {
2460 dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2461 nc, rc);
2462 return rc;
2463 }
2464 if ((of_property_read_bool(nc, "parallel-memories")) &&
2465 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2466 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2467 return -EINVAL;
2468 }
2469 for (idx = 0; idx < rc; idx++)
2470 spi_set_chipselect(spi, idx, cs[idx]);
2471
2472 /*
2473 * By default spi->chip_select[0] will hold the physical CS number,
2474 * so set bit 0 in spi->cs_index_mask.
2475 */
2476 spi->cs_index_mask = BIT(0);
2477
2478 /* Device speed */
2479 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2480 spi->max_speed_hz = value;
2481
2482 /* Device CS delays */
2483 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2484 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2485 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2486
2487 return 0;
2488}
2489
2490static struct spi_device *
2491of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2492{
2493 struct spi_device *spi;
2494 int rc;
2495
2496 /* Alloc an spi_device */
2497 spi = spi_alloc_device(ctlr);
2498 if (!spi) {
2499 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2500 rc = -ENOMEM;
2501 goto err_out;
2502 }
2503
2504 /* Select device driver */
2505 rc = of_alias_from_compatible(nc, spi->modalias,
2506 sizeof(spi->modalias));
2507 if (rc < 0) {
2508 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2509 goto err_out;
2510 }
2511
2512 rc = of_spi_parse_dt(ctlr, spi, nc);
2513 if (rc)
2514 goto err_out;
2515
2516 /* Store a pointer to the node in the device structure */
2517 of_node_get(nc);
2518
2519 device_set_node(&spi->dev, of_fwnode_handle(nc));
2520
2521 /* Register the new device */
2522 rc = spi_add_device(spi);
2523 if (rc) {
2524 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2525 goto err_of_node_put;
2526 }
2527
2528 return spi;
2529
2530err_of_node_put:
2531 of_node_put(nc);
2532err_out:
2533 spi_dev_put(spi);
2534 return ERR_PTR(rc);
2535}
2536
2537/**
2538 * of_register_spi_devices() - Register child devices onto the SPI bus
2539 * @ctlr: Pointer to spi_controller device
2540 *
2541 * Registers an spi_device for each child node of controller node which
2542 * represents a valid SPI slave.
2543 */
2544static void of_register_spi_devices(struct spi_controller *ctlr)
2545{
2546 struct spi_device *spi;
2547 struct device_node *nc;
2548
2549 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2550 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2551 continue;
2552 spi = of_register_spi_device(ctlr, nc);
2553 if (IS_ERR(spi)) {
2554 dev_warn(&ctlr->dev,
2555 "Failed to create SPI device for %pOF\n", nc);
2556 of_node_clear_flag(nc, OF_POPULATED);
2557 }
2558 }
2559}
2560#else
2561static void of_register_spi_devices(struct spi_controller *ctlr) { }
2562#endif
2563
2564/**
2565 * spi_new_ancillary_device() - Register ancillary SPI device
2566 * @spi: Pointer to the main SPI device registering the ancillary device
2567 * @chip_select: Chip Select of the ancillary device
2568 *
2569 * Register an ancillary SPI device; for example some chips have a chip-select
2570 * for normal device usage and another one for setup/firmware upload.
2571 *
2572 * This may only be called from main SPI device's probe routine.
2573 *
2574 * Return: 0 on success; negative errno on failure
2575 */
2576struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2577 u8 chip_select)
2578{
2579 struct spi_controller *ctlr = spi->controller;
2580 struct spi_device *ancillary;
2581 int rc = 0;
2582
2583 /* Alloc an spi_device */
2584 ancillary = spi_alloc_device(ctlr);
2585 if (!ancillary) {
2586 rc = -ENOMEM;
2587 goto err_out;
2588 }
2589
2590 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2591
2592 /* Use provided chip-select for ancillary device */
2593 spi_set_all_cs_unused(ancillary);
2594 spi_set_chipselect(ancillary, 0, chip_select);
2595
2596 /* Take over SPI mode/speed from SPI main device */
2597 ancillary->max_speed_hz = spi->max_speed_hz;
2598 ancillary->mode = spi->mode;
2599 /*
2600 * By default spi->chip_select[0] will hold the physical CS number,
2601 * so set bit 0 in spi->cs_index_mask.
2602 */
2603 ancillary->cs_index_mask = BIT(0);
2604
2605 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2606
2607 /* Register the new device */
2608 rc = __spi_add_device(ancillary);
2609 if (rc) {
2610 dev_err(&spi->dev, "failed to register ancillary device\n");
2611 goto err_out;
2612 }
2613
2614 return ancillary;
2615
2616err_out:
2617 spi_dev_put(ancillary);
2618 return ERR_PTR(rc);
2619}
2620EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2621
2622#ifdef CONFIG_ACPI
2623struct acpi_spi_lookup {
2624 struct spi_controller *ctlr;
2625 u32 max_speed_hz;
2626 u32 mode;
2627 int irq;
2628 u8 bits_per_word;
2629 u8 chip_select;
2630 int n;
2631 int index;
2632};
2633
2634static int acpi_spi_count(struct acpi_resource *ares, void *data)
2635{
2636 struct acpi_resource_spi_serialbus *sb;
2637 int *count = data;
2638
2639 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2640 return 1;
2641
2642 sb = &ares->data.spi_serial_bus;
2643 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2644 return 1;
2645
2646 *count = *count + 1;
2647
2648 return 1;
2649}
2650
2651/**
2652 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2653 * @adev: ACPI device
2654 *
2655 * Return: the number of SpiSerialBus resources in the ACPI-device's
2656 * resource-list; or a negative error code.
2657 */
2658int acpi_spi_count_resources(struct acpi_device *adev)
2659{
2660 LIST_HEAD(r);
2661 int count = 0;
2662 int ret;
2663
2664 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2665 if (ret < 0)
2666 return ret;
2667
2668 acpi_dev_free_resource_list(&r);
2669
2670 return count;
2671}
2672EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2673
2674static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2675 struct acpi_spi_lookup *lookup)
2676{
2677 const union acpi_object *obj;
2678
2679 if (!x86_apple_machine)
2680 return;
2681
2682 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2683 && obj->buffer.length >= 4)
2684 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2685
2686 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2687 && obj->buffer.length == 8)
2688 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2689
2690 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2691 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2692 lookup->mode |= SPI_LSB_FIRST;
2693
2694 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2695 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2696 lookup->mode |= SPI_CPOL;
2697
2698 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2699 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2700 lookup->mode |= SPI_CPHA;
2701}
2702
2703static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2704{
2705 struct acpi_spi_lookup *lookup = data;
2706 struct spi_controller *ctlr = lookup->ctlr;
2707
2708 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2709 struct acpi_resource_spi_serialbus *sb;
2710 acpi_handle parent_handle;
2711 acpi_status status;
2712
2713 sb = &ares->data.spi_serial_bus;
2714 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2715
2716 if (lookup->index != -1 && lookup->n++ != lookup->index)
2717 return 1;
2718
2719 status = acpi_get_handle(NULL,
2720 sb->resource_source.string_ptr,
2721 &parent_handle);
2722
2723 if (ACPI_FAILURE(status))
2724 return -ENODEV;
2725
2726 if (ctlr) {
2727 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2728 return -ENODEV;
2729 } else {
2730 struct acpi_device *adev;
2731
2732 adev = acpi_fetch_acpi_dev(parent_handle);
2733 if (!adev)
2734 return -ENODEV;
2735
2736 ctlr = acpi_spi_find_controller_by_adev(adev);
2737 if (!ctlr)
2738 return -EPROBE_DEFER;
2739
2740 lookup->ctlr = ctlr;
2741 }
2742
2743 /*
2744 * ACPI DeviceSelection numbering is handled by the
2745 * host controller driver in Windows and can vary
2746 * from driver to driver. In Linux we always expect
2747 * 0 .. max - 1 so we need to ask the driver to
2748 * translate between the two schemes.
2749 */
2750 if (ctlr->fw_translate_cs) {
2751 int cs = ctlr->fw_translate_cs(ctlr,
2752 sb->device_selection);
2753 if (cs < 0)
2754 return cs;
2755 lookup->chip_select = cs;
2756 } else {
2757 lookup->chip_select = sb->device_selection;
2758 }
2759
2760 lookup->max_speed_hz = sb->connection_speed;
2761 lookup->bits_per_word = sb->data_bit_length;
2762
2763 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2764 lookup->mode |= SPI_CPHA;
2765 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2766 lookup->mode |= SPI_CPOL;
2767 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2768 lookup->mode |= SPI_CS_HIGH;
2769 }
2770 } else if (lookup->irq < 0) {
2771 struct resource r;
2772
2773 if (acpi_dev_resource_interrupt(ares, 0, &r))
2774 lookup->irq = r.start;
2775 }
2776
2777 /* Always tell the ACPI core to skip this resource */
2778 return 1;
2779}
2780
2781/**
2782 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2783 * @ctlr: controller to which the spi device belongs
2784 * @adev: ACPI Device for the spi device
2785 * @index: Index of the spi resource inside the ACPI Node
2786 *
2787 * This should be used to allocate a new SPI device from and ACPI Device node.
2788 * The caller is responsible for calling spi_add_device to register the SPI device.
2789 *
2790 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2791 * using the resource.
2792 * If index is set to -1, index is not used.
2793 * Note: If index is -1, ctlr must be set.
2794 *
2795 * Return: a pointer to the new device, or ERR_PTR on error.
2796 */
2797struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2798 struct acpi_device *adev,
2799 int index)
2800{
2801 acpi_handle parent_handle = NULL;
2802 struct list_head resource_list;
2803 struct acpi_spi_lookup lookup = {};
2804 struct spi_device *spi;
2805 int ret;
2806
2807 if (!ctlr && index == -1)
2808 return ERR_PTR(-EINVAL);
2809
2810 lookup.ctlr = ctlr;
2811 lookup.irq = -1;
2812 lookup.index = index;
2813 lookup.n = 0;
2814
2815 INIT_LIST_HEAD(&resource_list);
2816 ret = acpi_dev_get_resources(adev, &resource_list,
2817 acpi_spi_add_resource, &lookup);
2818 acpi_dev_free_resource_list(&resource_list);
2819
2820 if (ret < 0)
2821 /* Found SPI in _CRS but it points to another controller */
2822 return ERR_PTR(ret);
2823
2824 if (!lookup.max_speed_hz &&
2825 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2826 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2827 /* Apple does not use _CRS but nested devices for SPI slaves */
2828 acpi_spi_parse_apple_properties(adev, &lookup);
2829 }
2830
2831 if (!lookup.max_speed_hz)
2832 return ERR_PTR(-ENODEV);
2833
2834 spi = spi_alloc_device(lookup.ctlr);
2835 if (!spi) {
2836 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2837 dev_name(&adev->dev));
2838 return ERR_PTR(-ENOMEM);
2839 }
2840
2841 spi_set_all_cs_unused(spi);
2842 spi_set_chipselect(spi, 0, lookup.chip_select);
2843
2844 ACPI_COMPANION_SET(&spi->dev, adev);
2845 spi->max_speed_hz = lookup.max_speed_hz;
2846 spi->mode |= lookup.mode;
2847 spi->irq = lookup.irq;
2848 spi->bits_per_word = lookup.bits_per_word;
2849 /*
2850 * By default spi->chip_select[0] will hold the physical CS number,
2851 * so set bit 0 in spi->cs_index_mask.
2852 */
2853 spi->cs_index_mask = BIT(0);
2854
2855 return spi;
2856}
2857EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2858
2859static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2860 struct acpi_device *adev)
2861{
2862 struct spi_device *spi;
2863
2864 if (acpi_bus_get_status(adev) || !adev->status.present ||
2865 acpi_device_enumerated(adev))
2866 return AE_OK;
2867
2868 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2869 if (IS_ERR(spi)) {
2870 if (PTR_ERR(spi) == -ENOMEM)
2871 return AE_NO_MEMORY;
2872 else
2873 return AE_OK;
2874 }
2875
2876 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2877 sizeof(spi->modalias));
2878
2879 if (spi->irq < 0)
2880 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2881
2882 acpi_device_set_enumerated(adev);
2883
2884 adev->power.flags.ignore_parent = true;
2885 if (spi_add_device(spi)) {
2886 adev->power.flags.ignore_parent = false;
2887 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2888 dev_name(&adev->dev));
2889 spi_dev_put(spi);
2890 }
2891
2892 return AE_OK;
2893}
2894
2895static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2896 void *data, void **return_value)
2897{
2898 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2899 struct spi_controller *ctlr = data;
2900
2901 if (!adev)
2902 return AE_OK;
2903
2904 return acpi_register_spi_device(ctlr, adev);
2905}
2906
2907#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2908
2909static void acpi_register_spi_devices(struct spi_controller *ctlr)
2910{
2911 acpi_status status;
2912 acpi_handle handle;
2913
2914 handle = ACPI_HANDLE(ctlr->dev.parent);
2915 if (!handle)
2916 return;
2917
2918 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2919 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2920 acpi_spi_add_device, NULL, ctlr, NULL);
2921 if (ACPI_FAILURE(status))
2922 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2923}
2924#else
2925static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2926#endif /* CONFIG_ACPI */
2927
2928static void spi_controller_release(struct device *dev)
2929{
2930 struct spi_controller *ctlr;
2931
2932 ctlr = container_of(dev, struct spi_controller, dev);
2933 kfree(ctlr);
2934}
2935
2936static struct class spi_master_class = {
2937 .name = "spi_master",
2938 .dev_release = spi_controller_release,
2939 .dev_groups = spi_master_groups,
2940};
2941
2942#ifdef CONFIG_SPI_SLAVE
2943/**
2944 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2945 * controller
2946 * @spi: device used for the current transfer
2947 */
2948int spi_slave_abort(struct spi_device *spi)
2949{
2950 struct spi_controller *ctlr = spi->controller;
2951
2952 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2953 return ctlr->slave_abort(ctlr);
2954
2955 return -ENOTSUPP;
2956}
2957EXPORT_SYMBOL_GPL(spi_slave_abort);
2958
2959int spi_target_abort(struct spi_device *spi)
2960{
2961 struct spi_controller *ctlr = spi->controller;
2962
2963 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2964 return ctlr->target_abort(ctlr);
2965
2966 return -ENOTSUPP;
2967}
2968EXPORT_SYMBOL_GPL(spi_target_abort);
2969
2970static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2971 char *buf)
2972{
2973 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2974 dev);
2975 struct device *child;
2976
2977 child = device_find_any_child(&ctlr->dev);
2978 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2979}
2980
2981static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2982 const char *buf, size_t count)
2983{
2984 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2985 dev);
2986 struct spi_device *spi;
2987 struct device *child;
2988 char name[32];
2989 int rc;
2990
2991 rc = sscanf(buf, "%31s", name);
2992 if (rc != 1 || !name[0])
2993 return -EINVAL;
2994
2995 child = device_find_any_child(&ctlr->dev);
2996 if (child) {
2997 /* Remove registered slave */
2998 device_unregister(child);
2999 put_device(child);
3000 }
3001
3002 if (strcmp(name, "(null)")) {
3003 /* Register new slave */
3004 spi = spi_alloc_device(ctlr);
3005 if (!spi)
3006 return -ENOMEM;
3007
3008 strscpy(spi->modalias, name, sizeof(spi->modalias));
3009
3010 rc = spi_add_device(spi);
3011 if (rc) {
3012 spi_dev_put(spi);
3013 return rc;
3014 }
3015 }
3016
3017 return count;
3018}
3019
3020static DEVICE_ATTR_RW(slave);
3021
3022static struct attribute *spi_slave_attrs[] = {
3023 &dev_attr_slave.attr,
3024 NULL,
3025};
3026
3027static const struct attribute_group spi_slave_group = {
3028 .attrs = spi_slave_attrs,
3029};
3030
3031static const struct attribute_group *spi_slave_groups[] = {
3032 &spi_controller_statistics_group,
3033 &spi_slave_group,
3034 NULL,
3035};
3036
3037static struct class spi_slave_class = {
3038 .name = "spi_slave",
3039 .dev_release = spi_controller_release,
3040 .dev_groups = spi_slave_groups,
3041};
3042#else
3043extern struct class spi_slave_class; /* dummy */
3044#endif
3045
3046/**
3047 * __spi_alloc_controller - allocate an SPI master or slave controller
3048 * @dev: the controller, possibly using the platform_bus
3049 * @size: how much zeroed driver-private data to allocate; the pointer to this
3050 * memory is in the driver_data field of the returned device, accessible
3051 * with spi_controller_get_devdata(); the memory is cacheline aligned;
3052 * drivers granting DMA access to portions of their private data need to
3053 * round up @size using ALIGN(size, dma_get_cache_alignment()).
3054 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
3055 * slave (true) controller
3056 * Context: can sleep
3057 *
3058 * This call is used only by SPI controller drivers, which are the
3059 * only ones directly touching chip registers. It's how they allocate
3060 * an spi_controller structure, prior to calling spi_register_controller().
3061 *
3062 * This must be called from context that can sleep.
3063 *
3064 * The caller is responsible for assigning the bus number and initializing the
3065 * controller's methods before calling spi_register_controller(); and (after
3066 * errors adding the device) calling spi_controller_put() to prevent a memory
3067 * leak.
3068 *
3069 * Return: the SPI controller structure on success, else NULL.
3070 */
3071struct spi_controller *__spi_alloc_controller(struct device *dev,
3072 unsigned int size, bool slave)
3073{
3074 struct spi_controller *ctlr;
3075 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3076
3077 if (!dev)
3078 return NULL;
3079
3080 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3081 if (!ctlr)
3082 return NULL;
3083
3084 device_initialize(&ctlr->dev);
3085 INIT_LIST_HEAD(&ctlr->queue);
3086 spin_lock_init(&ctlr->queue_lock);
3087 spin_lock_init(&ctlr->bus_lock_spinlock);
3088 mutex_init(&ctlr->bus_lock_mutex);
3089 mutex_init(&ctlr->io_mutex);
3090 mutex_init(&ctlr->add_lock);
3091 ctlr->bus_num = -1;
3092 ctlr->num_chipselect = 1;
3093 ctlr->slave = slave;
3094 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
3095 ctlr->dev.class = &spi_slave_class;
3096 else
3097 ctlr->dev.class = &spi_master_class;
3098 ctlr->dev.parent = dev;
3099 pm_suspend_ignore_children(&ctlr->dev, true);
3100 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3101
3102 return ctlr;
3103}
3104EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3105
3106static void devm_spi_release_controller(struct device *dev, void *ctlr)
3107{
3108 spi_controller_put(*(struct spi_controller **)ctlr);
3109}
3110
3111/**
3112 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3113 * @dev: physical device of SPI controller
3114 * @size: how much zeroed driver-private data to allocate
3115 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
3116 * Context: can sleep
3117 *
3118 * Allocate an SPI controller and automatically release a reference on it
3119 * when @dev is unbound from its driver. Drivers are thus relieved from
3120 * having to call spi_controller_put().
3121 *
3122 * The arguments to this function are identical to __spi_alloc_controller().
3123 *
3124 * Return: the SPI controller structure on success, else NULL.
3125 */
3126struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3127 unsigned int size,
3128 bool slave)
3129{
3130 struct spi_controller **ptr, *ctlr;
3131
3132 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3133 GFP_KERNEL);
3134 if (!ptr)
3135 return NULL;
3136
3137 ctlr = __spi_alloc_controller(dev, size, slave);
3138 if (ctlr) {
3139 ctlr->devm_allocated = true;
3140 *ptr = ctlr;
3141 devres_add(dev, ptr);
3142 } else {
3143 devres_free(ptr);
3144 }
3145
3146 return ctlr;
3147}
3148EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3149
3150/**
3151 * spi_get_gpio_descs() - grab chip select GPIOs for the master
3152 * @ctlr: The SPI master to grab GPIO descriptors for
3153 */
3154static int spi_get_gpio_descs(struct spi_controller *ctlr)
3155{
3156 int nb, i;
3157 struct gpio_desc **cs;
3158 struct device *dev = &ctlr->dev;
3159 unsigned long native_cs_mask = 0;
3160 unsigned int num_cs_gpios = 0;
3161
3162 nb = gpiod_count(dev, "cs");
3163 if (nb < 0) {
3164 /* No GPIOs at all is fine, else return the error */
3165 if (nb == -ENOENT)
3166 return 0;
3167 return nb;
3168 }
3169
3170 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3171
3172 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3173 GFP_KERNEL);
3174 if (!cs)
3175 return -ENOMEM;
3176 ctlr->cs_gpiods = cs;
3177
3178 for (i = 0; i < nb; i++) {
3179 /*
3180 * Most chipselects are active low, the inverted
3181 * semantics are handled by special quirks in gpiolib,
3182 * so initializing them GPIOD_OUT_LOW here means
3183 * "unasserted", in most cases this will drive the physical
3184 * line high.
3185 */
3186 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3187 GPIOD_OUT_LOW);
3188 if (IS_ERR(cs[i]))
3189 return PTR_ERR(cs[i]);
3190
3191 if (cs[i]) {
3192 /*
3193 * If we find a CS GPIO, name it after the device and
3194 * chip select line.
3195 */
3196 char *gpioname;
3197
3198 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3199 dev_name(dev), i);
3200 if (!gpioname)
3201 return -ENOMEM;
3202 gpiod_set_consumer_name(cs[i], gpioname);
3203 num_cs_gpios++;
3204 continue;
3205 }
3206
3207 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3208 dev_err(dev, "Invalid native chip select %d\n", i);
3209 return -EINVAL;
3210 }
3211 native_cs_mask |= BIT(i);
3212 }
3213
3214 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3215
3216 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3217 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3218 dev_err(dev, "No unused native chip select available\n");
3219 return -EINVAL;
3220 }
3221
3222 return 0;
3223}
3224
3225static int spi_controller_check_ops(struct spi_controller *ctlr)
3226{
3227 /*
3228 * The controller may implement only the high-level SPI-memory like
3229 * operations if it does not support regular SPI transfers, and this is
3230 * valid use case.
3231 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3232 * one of the ->transfer_xxx() method be implemented.
3233 */
3234 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3235 if (!ctlr->transfer && !ctlr->transfer_one &&
3236 !ctlr->transfer_one_message) {
3237 return -EINVAL;
3238 }
3239 }
3240
3241 return 0;
3242}
3243
3244/* Allocate dynamic bus number using Linux idr */
3245static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3246{
3247 int id;
3248
3249 mutex_lock(&board_lock);
3250 id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3251 mutex_unlock(&board_lock);
3252 if (WARN(id < 0, "couldn't get idr"))
3253 return id == -ENOSPC ? -EBUSY : id;
3254 ctlr->bus_num = id;
3255 return 0;
3256}
3257
3258/**
3259 * spi_register_controller - register SPI master or slave controller
3260 * @ctlr: initialized master, originally from spi_alloc_master() or
3261 * spi_alloc_slave()
3262 * Context: can sleep
3263 *
3264 * SPI controllers connect to their drivers using some non-SPI bus,
3265 * such as the platform bus. The final stage of probe() in that code
3266 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3267 *
3268 * SPI controllers use board specific (often SOC specific) bus numbers,
3269 * and board-specific addressing for SPI devices combines those numbers
3270 * with chip select numbers. Since SPI does not directly support dynamic
3271 * device identification, boards need configuration tables telling which
3272 * chip is at which address.
3273 *
3274 * This must be called from context that can sleep. It returns zero on
3275 * success, else a negative error code (dropping the controller's refcount).
3276 * After a successful return, the caller is responsible for calling
3277 * spi_unregister_controller().
3278 *
3279 * Return: zero on success, else a negative error code.
3280 */
3281int spi_register_controller(struct spi_controller *ctlr)
3282{
3283 struct device *dev = ctlr->dev.parent;
3284 struct boardinfo *bi;
3285 int first_dynamic;
3286 int status;
3287 int idx;
3288
3289 if (!dev)
3290 return -ENODEV;
3291
3292 /*
3293 * Make sure all necessary hooks are implemented before registering
3294 * the SPI controller.
3295 */
3296 status = spi_controller_check_ops(ctlr);
3297 if (status)
3298 return status;
3299
3300 if (ctlr->bus_num < 0)
3301 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3302 if (ctlr->bus_num >= 0) {
3303 /* Devices with a fixed bus num must check-in with the num */
3304 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3305 if (status)
3306 return status;
3307 }
3308 if (ctlr->bus_num < 0) {
3309 first_dynamic = of_alias_get_highest_id("spi");
3310 if (first_dynamic < 0)
3311 first_dynamic = 0;
3312 else
3313 first_dynamic++;
3314
3315 status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3316 if (status)
3317 return status;
3318 }
3319 ctlr->bus_lock_flag = 0;
3320 init_completion(&ctlr->xfer_completion);
3321 init_completion(&ctlr->cur_msg_completion);
3322 if (!ctlr->max_dma_len)
3323 ctlr->max_dma_len = INT_MAX;
3324
3325 /*
3326 * Register the device, then userspace will see it.
3327 * Registration fails if the bus ID is in use.
3328 */
3329 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3330
3331 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3332 status = spi_get_gpio_descs(ctlr);
3333 if (status)
3334 goto free_bus_id;
3335 /*
3336 * A controller using GPIO descriptors always
3337 * supports SPI_CS_HIGH if need be.
3338 */
3339 ctlr->mode_bits |= SPI_CS_HIGH;
3340 }
3341
3342 /*
3343 * Even if it's just one always-selected device, there must
3344 * be at least one chipselect.
3345 */
3346 if (!ctlr->num_chipselect) {
3347 status = -EINVAL;
3348 goto free_bus_id;
3349 }
3350
3351 /* Setting last_cs to SPI_INVALID_CS means no chip selected */
3352 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3353 ctlr->last_cs[idx] = SPI_INVALID_CS;
3354
3355 status = device_add(&ctlr->dev);
3356 if (status < 0)
3357 goto free_bus_id;
3358 dev_dbg(dev, "registered %s %s\n",
3359 spi_controller_is_slave(ctlr) ? "slave" : "master",
3360 dev_name(&ctlr->dev));
3361
3362 /*
3363 * If we're using a queued driver, start the queue. Note that we don't
3364 * need the queueing logic if the driver is only supporting high-level
3365 * memory operations.
3366 */
3367 if (ctlr->transfer) {
3368 dev_info(dev, "controller is unqueued, this is deprecated\n");
3369 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3370 status = spi_controller_initialize_queue(ctlr);
3371 if (status) {
3372 device_del(&ctlr->dev);
3373 goto free_bus_id;
3374 }
3375 }
3376 /* Add statistics */
3377 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3378 if (!ctlr->pcpu_statistics) {
3379 dev_err(dev, "Error allocating per-cpu statistics\n");
3380 status = -ENOMEM;
3381 goto destroy_queue;
3382 }
3383
3384 mutex_lock(&board_lock);
3385 list_add_tail(&ctlr->list, &spi_controller_list);
3386 list_for_each_entry(bi, &board_list, list)
3387 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3388 mutex_unlock(&board_lock);
3389
3390 /* Register devices from the device tree and ACPI */
3391 of_register_spi_devices(ctlr);
3392 acpi_register_spi_devices(ctlr);
3393 return status;
3394
3395destroy_queue:
3396 spi_destroy_queue(ctlr);
3397free_bus_id:
3398 mutex_lock(&board_lock);
3399 idr_remove(&spi_master_idr, ctlr->bus_num);
3400 mutex_unlock(&board_lock);
3401 return status;
3402}
3403EXPORT_SYMBOL_GPL(spi_register_controller);
3404
3405static void devm_spi_unregister(struct device *dev, void *res)
3406{
3407 spi_unregister_controller(*(struct spi_controller **)res);
3408}
3409
3410/**
3411 * devm_spi_register_controller - register managed SPI master or slave
3412 * controller
3413 * @dev: device managing SPI controller
3414 * @ctlr: initialized controller, originally from spi_alloc_master() or
3415 * spi_alloc_slave()
3416 * Context: can sleep
3417 *
3418 * Register a SPI device as with spi_register_controller() which will
3419 * automatically be unregistered and freed.
3420 *
3421 * Return: zero on success, else a negative error code.
3422 */
3423int devm_spi_register_controller(struct device *dev,
3424 struct spi_controller *ctlr)
3425{
3426 struct spi_controller **ptr;
3427 int ret;
3428
3429 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3430 if (!ptr)
3431 return -ENOMEM;
3432
3433 ret = spi_register_controller(ctlr);
3434 if (!ret) {
3435 *ptr = ctlr;
3436 devres_add(dev, ptr);
3437 } else {
3438 devres_free(ptr);
3439 }
3440
3441 return ret;
3442}
3443EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3444
3445static int __unregister(struct device *dev, void *null)
3446{
3447 spi_unregister_device(to_spi_device(dev));
3448 return 0;
3449}
3450
3451/**
3452 * spi_unregister_controller - unregister SPI master or slave controller
3453 * @ctlr: the controller being unregistered
3454 * Context: can sleep
3455 *
3456 * This call is used only by SPI controller drivers, which are the
3457 * only ones directly touching chip registers.
3458 *
3459 * This must be called from context that can sleep.
3460 *
3461 * Note that this function also drops a reference to the controller.
3462 */
3463void spi_unregister_controller(struct spi_controller *ctlr)
3464{
3465 struct spi_controller *found;
3466 int id = ctlr->bus_num;
3467
3468 /* Prevent addition of new devices, unregister existing ones */
3469 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3470 mutex_lock(&ctlr->add_lock);
3471
3472 device_for_each_child(&ctlr->dev, NULL, __unregister);
3473
3474 /* First make sure that this controller was ever added */
3475 mutex_lock(&board_lock);
3476 found = idr_find(&spi_master_idr, id);
3477 mutex_unlock(&board_lock);
3478 if (ctlr->queued) {
3479 if (spi_destroy_queue(ctlr))
3480 dev_err(&ctlr->dev, "queue remove failed\n");
3481 }
3482 mutex_lock(&board_lock);
3483 list_del(&ctlr->list);
3484 mutex_unlock(&board_lock);
3485
3486 device_del(&ctlr->dev);
3487
3488 /* Free bus id */
3489 mutex_lock(&board_lock);
3490 if (found == ctlr)
3491 idr_remove(&spi_master_idr, id);
3492 mutex_unlock(&board_lock);
3493
3494 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3495 mutex_unlock(&ctlr->add_lock);
3496
3497 /*
3498 * Release the last reference on the controller if its driver
3499 * has not yet been converted to devm_spi_alloc_master/slave().
3500 */
3501 if (!ctlr->devm_allocated)
3502 put_device(&ctlr->dev);
3503}
3504EXPORT_SYMBOL_GPL(spi_unregister_controller);
3505
3506static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3507{
3508 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3509}
3510
3511static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3512{
3513 mutex_lock(&ctlr->bus_lock_mutex);
3514 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3515 mutex_unlock(&ctlr->bus_lock_mutex);
3516}
3517
3518static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3519{
3520 mutex_lock(&ctlr->bus_lock_mutex);
3521 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3522 mutex_unlock(&ctlr->bus_lock_mutex);
3523}
3524
3525int spi_controller_suspend(struct spi_controller *ctlr)
3526{
3527 int ret = 0;
3528
3529 /* Basically no-ops for non-queued controllers */
3530 if (ctlr->queued) {
3531 ret = spi_stop_queue(ctlr);
3532 if (ret)
3533 dev_err(&ctlr->dev, "queue stop failed\n");
3534 }
3535
3536 __spi_mark_suspended(ctlr);
3537 return ret;
3538}
3539EXPORT_SYMBOL_GPL(spi_controller_suspend);
3540
3541int spi_controller_resume(struct spi_controller *ctlr)
3542{
3543 int ret = 0;
3544
3545 __spi_mark_resumed(ctlr);
3546
3547 if (ctlr->queued) {
3548 ret = spi_start_queue(ctlr);
3549 if (ret)
3550 dev_err(&ctlr->dev, "queue restart failed\n");
3551 }
3552 return ret;
3553}
3554EXPORT_SYMBOL_GPL(spi_controller_resume);
3555
3556/*-------------------------------------------------------------------------*/
3557
3558/* Core methods for spi_message alterations */
3559
3560static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3561 struct spi_message *msg,
3562 void *res)
3563{
3564 struct spi_replaced_transfers *rxfer = res;
3565 size_t i;
3566
3567 /* Call extra callback if requested */
3568 if (rxfer->release)
3569 rxfer->release(ctlr, msg, res);
3570
3571 /* Insert replaced transfers back into the message */
3572 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3573
3574 /* Remove the formerly inserted entries */
3575 for (i = 0; i < rxfer->inserted; i++)
3576 list_del(&rxfer->inserted_transfers[i].transfer_list);
3577}
3578
3579/**
3580 * spi_replace_transfers - replace transfers with several transfers
3581 * and register change with spi_message.resources
3582 * @msg: the spi_message we work upon
3583 * @xfer_first: the first spi_transfer we want to replace
3584 * @remove: number of transfers to remove
3585 * @insert: the number of transfers we want to insert instead
3586 * @release: extra release code necessary in some circumstances
3587 * @extradatasize: extra data to allocate (with alignment guarantees
3588 * of struct @spi_transfer)
3589 * @gfp: gfp flags
3590 *
3591 * Returns: pointer to @spi_replaced_transfers,
3592 * PTR_ERR(...) in case of errors.
3593 */
3594static struct spi_replaced_transfers *spi_replace_transfers(
3595 struct spi_message *msg,
3596 struct spi_transfer *xfer_first,
3597 size_t remove,
3598 size_t insert,
3599 spi_replaced_release_t release,
3600 size_t extradatasize,
3601 gfp_t gfp)
3602{
3603 struct spi_replaced_transfers *rxfer;
3604 struct spi_transfer *xfer;
3605 size_t i;
3606
3607 /* Allocate the structure using spi_res */
3608 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3609 struct_size(rxfer, inserted_transfers, insert)
3610 + extradatasize,
3611 gfp);
3612 if (!rxfer)
3613 return ERR_PTR(-ENOMEM);
3614
3615 /* The release code to invoke before running the generic release */
3616 rxfer->release = release;
3617
3618 /* Assign extradata */
3619 if (extradatasize)
3620 rxfer->extradata =
3621 &rxfer->inserted_transfers[insert];
3622
3623 /* Init the replaced_transfers list */
3624 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3625
3626 /*
3627 * Assign the list_entry after which we should reinsert
3628 * the @replaced_transfers - it may be spi_message.messages!
3629 */
3630 rxfer->replaced_after = xfer_first->transfer_list.prev;
3631
3632 /* Remove the requested number of transfers */
3633 for (i = 0; i < remove; i++) {
3634 /*
3635 * If the entry after replaced_after it is msg->transfers
3636 * then we have been requested to remove more transfers
3637 * than are in the list.
3638 */
3639 if (rxfer->replaced_after->next == &msg->transfers) {
3640 dev_err(&msg->spi->dev,
3641 "requested to remove more spi_transfers than are available\n");
3642 /* Insert replaced transfers back into the message */
3643 list_splice(&rxfer->replaced_transfers,
3644 rxfer->replaced_after);
3645
3646 /* Free the spi_replace_transfer structure... */
3647 spi_res_free(rxfer);
3648
3649 /* ...and return with an error */
3650 return ERR_PTR(-EINVAL);
3651 }
3652
3653 /*
3654 * Remove the entry after replaced_after from list of
3655 * transfers and add it to list of replaced_transfers.
3656 */
3657 list_move_tail(rxfer->replaced_after->next,
3658 &rxfer->replaced_transfers);
3659 }
3660
3661 /*
3662 * Create copy of the given xfer with identical settings
3663 * based on the first transfer to get removed.
3664 */
3665 for (i = 0; i < insert; i++) {
3666 /* We need to run in reverse order */
3667 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3668
3669 /* Copy all spi_transfer data */
3670 memcpy(xfer, xfer_first, sizeof(*xfer));
3671
3672 /* Add to list */
3673 list_add(&xfer->transfer_list, rxfer->replaced_after);
3674
3675 /* Clear cs_change and delay for all but the last */
3676 if (i) {
3677 xfer->cs_change = false;
3678 xfer->delay.value = 0;
3679 }
3680 }
3681
3682 /* Set up inserted... */
3683 rxfer->inserted = insert;
3684
3685 /* ...and register it with spi_res/spi_message */
3686 spi_res_add(msg, rxfer);
3687
3688 return rxfer;
3689}
3690
3691static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3692 struct spi_message *msg,
3693 struct spi_transfer **xferp,
3694 size_t maxsize)
3695{
3696 struct spi_transfer *xfer = *xferp, *xfers;
3697 struct spi_replaced_transfers *srt;
3698 size_t offset;
3699 size_t count, i;
3700
3701 /* Calculate how many we have to replace */
3702 count = DIV_ROUND_UP(xfer->len, maxsize);
3703
3704 /* Create replacement */
3705 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3706 if (IS_ERR(srt))
3707 return PTR_ERR(srt);
3708 xfers = srt->inserted_transfers;
3709
3710 /*
3711 * Now handle each of those newly inserted spi_transfers.
3712 * Note that the replacements spi_transfers all are preset
3713 * to the same values as *xferp, so tx_buf, rx_buf and len
3714 * are all identical (as well as most others)
3715 * so we just have to fix up len and the pointers.
3716 *
3717 * This also includes support for the depreciated
3718 * spi_message.is_dma_mapped interface.
3719 */
3720
3721 /*
3722 * The first transfer just needs the length modified, so we
3723 * run it outside the loop.
3724 */
3725 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3726
3727 /* All the others need rx_buf/tx_buf also set */
3728 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3729 /* Update rx_buf, tx_buf and DMA */
3730 if (xfers[i].rx_buf)
3731 xfers[i].rx_buf += offset;
3732 if (xfers[i].rx_dma)
3733 xfers[i].rx_dma += offset;
3734 if (xfers[i].tx_buf)
3735 xfers[i].tx_buf += offset;
3736 if (xfers[i].tx_dma)
3737 xfers[i].tx_dma += offset;
3738
3739 /* Update length */
3740 xfers[i].len = min(maxsize, xfers[i].len - offset);
3741 }
3742
3743 /*
3744 * We set up xferp to the last entry we have inserted,
3745 * so that we skip those already split transfers.
3746 */
3747 *xferp = &xfers[count - 1];
3748
3749 /* Increment statistics counters */
3750 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3751 transfers_split_maxsize);
3752 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3753 transfers_split_maxsize);
3754
3755 return 0;
3756}
3757
3758/**
3759 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3760 * when an individual transfer exceeds a
3761 * certain size
3762 * @ctlr: the @spi_controller for this transfer
3763 * @msg: the @spi_message to transform
3764 * @maxsize: the maximum when to apply this
3765 *
3766 * This function allocates resources that are automatically freed during the
3767 * spi message unoptimize phase so this function should only be called from
3768 * optimize_message callbacks.
3769 *
3770 * Return: status of transformation
3771 */
3772int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3773 struct spi_message *msg,
3774 size_t maxsize)
3775{
3776 struct spi_transfer *xfer;
3777 int ret;
3778
3779 /*
3780 * Iterate over the transfer_list,
3781 * but note that xfer is advanced to the last transfer inserted
3782 * to avoid checking sizes again unnecessarily (also xfer does
3783 * potentially belong to a different list by the time the
3784 * replacement has happened).
3785 */
3786 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3787 if (xfer->len > maxsize) {
3788 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3789 maxsize);
3790 if (ret)
3791 return ret;
3792 }
3793 }
3794
3795 return 0;
3796}
3797EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3798
3799
3800/**
3801 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3802 * when an individual transfer exceeds a
3803 * certain number of SPI words
3804 * @ctlr: the @spi_controller for this transfer
3805 * @msg: the @spi_message to transform
3806 * @maxwords: the number of words to limit each transfer to
3807 *
3808 * This function allocates resources that are automatically freed during the
3809 * spi message unoptimize phase so this function should only be called from
3810 * optimize_message callbacks.
3811 *
3812 * Return: status of transformation
3813 */
3814int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3815 struct spi_message *msg,
3816 size_t maxwords)
3817{
3818 struct spi_transfer *xfer;
3819
3820 /*
3821 * Iterate over the transfer_list,
3822 * but note that xfer is advanced to the last transfer inserted
3823 * to avoid checking sizes again unnecessarily (also xfer does
3824 * potentially belong to a different list by the time the
3825 * replacement has happened).
3826 */
3827 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3828 size_t maxsize;
3829 int ret;
3830
3831 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3832 if (xfer->len > maxsize) {
3833 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3834 maxsize);
3835 if (ret)
3836 return ret;
3837 }
3838 }
3839
3840 return 0;
3841}
3842EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3843
3844/*-------------------------------------------------------------------------*/
3845
3846/*
3847 * Core methods for SPI controller protocol drivers. Some of the
3848 * other core methods are currently defined as inline functions.
3849 */
3850
3851static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3852 u8 bits_per_word)
3853{
3854 if (ctlr->bits_per_word_mask) {
3855 /* Only 32 bits fit in the mask */
3856 if (bits_per_word > 32)
3857 return -EINVAL;
3858 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3859 return -EINVAL;
3860 }
3861
3862 return 0;
3863}
3864
3865/**
3866 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3867 * @spi: the device that requires specific CS timing configuration
3868 *
3869 * Return: zero on success, else a negative error code.
3870 */
3871static int spi_set_cs_timing(struct spi_device *spi)
3872{
3873 struct device *parent = spi->controller->dev.parent;
3874 int status = 0;
3875
3876 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3877 if (spi->controller->auto_runtime_pm) {
3878 status = pm_runtime_get_sync(parent);
3879 if (status < 0) {
3880 pm_runtime_put_noidle(parent);
3881 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3882 status);
3883 return status;
3884 }
3885
3886 status = spi->controller->set_cs_timing(spi);
3887 pm_runtime_mark_last_busy(parent);
3888 pm_runtime_put_autosuspend(parent);
3889 } else {
3890 status = spi->controller->set_cs_timing(spi);
3891 }
3892 }
3893 return status;
3894}
3895
3896/**
3897 * spi_setup - setup SPI mode and clock rate
3898 * @spi: the device whose settings are being modified
3899 * Context: can sleep, and no requests are queued to the device
3900 *
3901 * SPI protocol drivers may need to update the transfer mode if the
3902 * device doesn't work with its default. They may likewise need
3903 * to update clock rates or word sizes from initial values. This function
3904 * changes those settings, and must be called from a context that can sleep.
3905 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3906 * effect the next time the device is selected and data is transferred to
3907 * or from it. When this function returns, the SPI device is deselected.
3908 *
3909 * Note that this call will fail if the protocol driver specifies an option
3910 * that the underlying controller or its driver does not support. For
3911 * example, not all hardware supports wire transfers using nine bit words,
3912 * LSB-first wire encoding, or active-high chipselects.
3913 *
3914 * Return: zero on success, else a negative error code.
3915 */
3916int spi_setup(struct spi_device *spi)
3917{
3918 unsigned bad_bits, ugly_bits;
3919 int status = 0;
3920
3921 /*
3922 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3923 * are set at the same time.
3924 */
3925 if ((hweight_long(spi->mode &
3926 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3927 (hweight_long(spi->mode &
3928 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3929 dev_err(&spi->dev,
3930 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3931 return -EINVAL;
3932 }
3933 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3934 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3935 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3936 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3937 return -EINVAL;
3938 /*
3939 * Help drivers fail *cleanly* when they need options
3940 * that aren't supported with their current controller.
3941 * SPI_CS_WORD has a fallback software implementation,
3942 * so it is ignored here.
3943 */
3944 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3945 SPI_NO_TX | SPI_NO_RX);
3946 ugly_bits = bad_bits &
3947 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3948 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3949 if (ugly_bits) {
3950 dev_warn(&spi->dev,
3951 "setup: ignoring unsupported mode bits %x\n",
3952 ugly_bits);
3953 spi->mode &= ~ugly_bits;
3954 bad_bits &= ~ugly_bits;
3955 }
3956 if (bad_bits) {
3957 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3958 bad_bits);
3959 return -EINVAL;
3960 }
3961
3962 if (!spi->bits_per_word) {
3963 spi->bits_per_word = 8;
3964 } else {
3965 /*
3966 * Some controllers may not support the default 8 bits-per-word
3967 * so only perform the check when this is explicitly provided.
3968 */
3969 status = __spi_validate_bits_per_word(spi->controller,
3970 spi->bits_per_word);
3971 if (status)
3972 return status;
3973 }
3974
3975 if (spi->controller->max_speed_hz &&
3976 (!spi->max_speed_hz ||
3977 spi->max_speed_hz > spi->controller->max_speed_hz))
3978 spi->max_speed_hz = spi->controller->max_speed_hz;
3979
3980 mutex_lock(&spi->controller->io_mutex);
3981
3982 if (spi->controller->setup) {
3983 status = spi->controller->setup(spi);
3984 if (status) {
3985 mutex_unlock(&spi->controller->io_mutex);
3986 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3987 status);
3988 return status;
3989 }
3990 }
3991
3992 status = spi_set_cs_timing(spi);
3993 if (status) {
3994 mutex_unlock(&spi->controller->io_mutex);
3995 return status;
3996 }
3997
3998 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3999 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
4000 if (status < 0) {
4001 mutex_unlock(&spi->controller->io_mutex);
4002 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
4003 status);
4004 return status;
4005 }
4006
4007 /*
4008 * We do not want to return positive value from pm_runtime_get,
4009 * there are many instances of devices calling spi_setup() and
4010 * checking for a non-zero return value instead of a negative
4011 * return value.
4012 */
4013 status = 0;
4014
4015 spi_set_cs(spi, false, true);
4016 pm_runtime_mark_last_busy(spi->controller->dev.parent);
4017 pm_runtime_put_autosuspend(spi->controller->dev.parent);
4018 } else {
4019 spi_set_cs(spi, false, true);
4020 }
4021
4022 mutex_unlock(&spi->controller->io_mutex);
4023
4024 if (spi->rt && !spi->controller->rt) {
4025 spi->controller->rt = true;
4026 spi_set_thread_rt(spi->controller);
4027 }
4028
4029 trace_spi_setup(spi, status);
4030
4031 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4032 spi->mode & SPI_MODE_X_MASK,
4033 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4034 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4035 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
4036 (spi->mode & SPI_LOOP) ? "loopback, " : "",
4037 spi->bits_per_word, spi->max_speed_hz,
4038 status);
4039
4040 return status;
4041}
4042EXPORT_SYMBOL_GPL(spi_setup);
4043
4044static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4045 struct spi_device *spi)
4046{
4047 int delay1, delay2;
4048
4049 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4050 if (delay1 < 0)
4051 return delay1;
4052
4053 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4054 if (delay2 < 0)
4055 return delay2;
4056
4057 if (delay1 < delay2)
4058 memcpy(&xfer->word_delay, &spi->word_delay,
4059 sizeof(xfer->word_delay));
4060
4061 return 0;
4062}
4063
4064static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4065{
4066 struct spi_controller *ctlr = spi->controller;
4067 struct spi_transfer *xfer;
4068 int w_size;
4069
4070 if (list_empty(&message->transfers))
4071 return -EINVAL;
4072
4073 message->spi = spi;
4074
4075 /*
4076 * Half-duplex links include original MicroWire, and ones with
4077 * only one data pin like SPI_3WIRE (switches direction) or where
4078 * either MOSI or MISO is missing. They can also be caused by
4079 * software limitations.
4080 */
4081 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4082 (spi->mode & SPI_3WIRE)) {
4083 unsigned flags = ctlr->flags;
4084
4085 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4086 if (xfer->rx_buf && xfer->tx_buf)
4087 return -EINVAL;
4088 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4089 return -EINVAL;
4090 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4091 return -EINVAL;
4092 }
4093 }
4094
4095 /*
4096 * Set transfer bits_per_word and max speed as spi device default if
4097 * it is not set for this transfer.
4098 * Set transfer tx_nbits and rx_nbits as single transfer default
4099 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4100 * Ensure transfer word_delay is at least as long as that required by
4101 * device itself.
4102 */
4103 message->frame_length = 0;
4104 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4105 xfer->effective_speed_hz = 0;
4106 message->frame_length += xfer->len;
4107 if (!xfer->bits_per_word)
4108 xfer->bits_per_word = spi->bits_per_word;
4109
4110 if (!xfer->speed_hz)
4111 xfer->speed_hz = spi->max_speed_hz;
4112
4113 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4114 xfer->speed_hz = ctlr->max_speed_hz;
4115
4116 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4117 return -EINVAL;
4118
4119 /*
4120 * SPI transfer length should be multiple of SPI word size
4121 * where SPI word size should be power-of-two multiple.
4122 */
4123 if (xfer->bits_per_word <= 8)
4124 w_size = 1;
4125 else if (xfer->bits_per_word <= 16)
4126 w_size = 2;
4127 else
4128 w_size = 4;
4129
4130 /* No partial transfers accepted */
4131 if (xfer->len % w_size)
4132 return -EINVAL;
4133
4134 if (xfer->speed_hz && ctlr->min_speed_hz &&
4135 xfer->speed_hz < ctlr->min_speed_hz)
4136 return -EINVAL;
4137
4138 if (xfer->tx_buf && !xfer->tx_nbits)
4139 xfer->tx_nbits = SPI_NBITS_SINGLE;
4140 if (xfer->rx_buf && !xfer->rx_nbits)
4141 xfer->rx_nbits = SPI_NBITS_SINGLE;
4142 /*
4143 * Check transfer tx/rx_nbits:
4144 * 1. check the value matches one of single, dual and quad
4145 * 2. check tx/rx_nbits match the mode in spi_device
4146 */
4147 if (xfer->tx_buf) {
4148 if (spi->mode & SPI_NO_TX)
4149 return -EINVAL;
4150 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4151 xfer->tx_nbits != SPI_NBITS_DUAL &&
4152 xfer->tx_nbits != SPI_NBITS_QUAD)
4153 return -EINVAL;
4154 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4155 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4156 return -EINVAL;
4157 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4158 !(spi->mode & SPI_TX_QUAD))
4159 return -EINVAL;
4160 }
4161 /* Check transfer rx_nbits */
4162 if (xfer->rx_buf) {
4163 if (spi->mode & SPI_NO_RX)
4164 return -EINVAL;
4165 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4166 xfer->rx_nbits != SPI_NBITS_DUAL &&
4167 xfer->rx_nbits != SPI_NBITS_QUAD)
4168 return -EINVAL;
4169 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4170 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4171 return -EINVAL;
4172 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4173 !(spi->mode & SPI_RX_QUAD))
4174 return -EINVAL;
4175 }
4176
4177 if (_spi_xfer_word_delay_update(xfer, spi))
4178 return -EINVAL;
4179 }
4180
4181 message->status = -EINPROGRESS;
4182
4183 return 0;
4184}
4185
4186/*
4187 * spi_split_transfers - generic handling of transfer splitting
4188 * @msg: the message to split
4189 *
4190 * Under certain conditions, a SPI controller may not support arbitrary
4191 * transfer sizes or other features required by a peripheral. This function
4192 * will split the transfers in the message into smaller transfers that are
4193 * supported by the controller.
4194 *
4195 * Controllers with special requirements not covered here can also split
4196 * transfers in the optimize_message() callback.
4197 *
4198 * Context: can sleep
4199 * Return: zero on success, else a negative error code
4200 */
4201static int spi_split_transfers(struct spi_message *msg)
4202{
4203 struct spi_controller *ctlr = msg->spi->controller;
4204 struct spi_transfer *xfer;
4205 int ret;
4206
4207 /*
4208 * If an SPI controller does not support toggling the CS line on each
4209 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4210 * for the CS line, we can emulate the CS-per-word hardware function by
4211 * splitting transfers into one-word transfers and ensuring that
4212 * cs_change is set for each transfer.
4213 */
4214 if ((msg->spi->mode & SPI_CS_WORD) &&
4215 (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4216 ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4217 if (ret)
4218 return ret;
4219
4220 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4221 /* Don't change cs_change on the last entry in the list */
4222 if (list_is_last(&xfer->transfer_list, &msg->transfers))
4223 break;
4224
4225 xfer->cs_change = 1;
4226 }
4227 } else {
4228 ret = spi_split_transfers_maxsize(ctlr, msg,
4229 spi_max_transfer_size(msg->spi));
4230 if (ret)
4231 return ret;
4232 }
4233
4234 return 0;
4235}
4236
4237/*
4238 * __spi_optimize_message - shared implementation for spi_optimize_message()
4239 * and spi_maybe_optimize_message()
4240 * @spi: the device that will be used for the message
4241 * @msg: the message to optimize
4242 *
4243 * Peripheral drivers will call spi_optimize_message() and the spi core will
4244 * call spi_maybe_optimize_message() instead of calling this directly.
4245 *
4246 * It is not valid to call this on a message that has already been optimized.
4247 *
4248 * Return: zero on success, else a negative error code
4249 */
4250static int __spi_optimize_message(struct spi_device *spi,
4251 struct spi_message *msg)
4252{
4253 struct spi_controller *ctlr = spi->controller;
4254 int ret;
4255
4256 ret = __spi_validate(spi, msg);
4257 if (ret)
4258 return ret;
4259
4260 ret = spi_split_transfers(msg);
4261 if (ret)
4262 return ret;
4263
4264 if (ctlr->optimize_message) {
4265 ret = ctlr->optimize_message(msg);
4266 if (ret) {
4267 spi_res_release(ctlr, msg);
4268 return ret;
4269 }
4270 }
4271
4272 msg->optimized = true;
4273
4274 return 0;
4275}
4276
4277/*
4278 * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4279 * @spi: the device that will be used for the message
4280 * @msg: the message to optimize
4281 * Return: zero on success, else a negative error code
4282 */
4283static int spi_maybe_optimize_message(struct spi_device *spi,
4284 struct spi_message *msg)
4285{
4286 if (msg->pre_optimized)
4287 return 0;
4288
4289 return __spi_optimize_message(spi, msg);
4290}
4291
4292/**
4293 * spi_optimize_message - do any one-time validation and setup for a SPI message
4294 * @spi: the device that will be used for the message
4295 * @msg: the message to optimize
4296 *
4297 * Peripheral drivers that reuse the same message repeatedly may call this to
4298 * perform as much message prep as possible once, rather than repeating it each
4299 * time a message transfer is performed to improve throughput and reduce CPU
4300 * usage.
4301 *
4302 * Once a message has been optimized, it cannot be modified with the exception
4303 * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4304 * only the data in the memory it points to).
4305 *
4306 * Calls to this function must be balanced with calls to spi_unoptimize_message()
4307 * to avoid leaking resources.
4308 *
4309 * Context: can sleep
4310 * Return: zero on success, else a negative error code
4311 */
4312int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4313{
4314 int ret;
4315
4316 ret = __spi_optimize_message(spi, msg);
4317 if (ret)
4318 return ret;
4319
4320 /*
4321 * This flag indicates that the peripheral driver called spi_optimize_message()
4322 * and therefore we shouldn't unoptimize message automatically when finalizing
4323 * the message but rather wait until spi_unoptimize_message() is called
4324 * by the peripheral driver.
4325 */
4326 msg->pre_optimized = true;
4327
4328 return 0;
4329}
4330EXPORT_SYMBOL_GPL(spi_optimize_message);
4331
4332/**
4333 * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4334 * @msg: the message to unoptimize
4335 *
4336 * Calls to this function must be balanced with calls to spi_optimize_message().
4337 *
4338 * Context: can sleep
4339 */
4340void spi_unoptimize_message(struct spi_message *msg)
4341{
4342 __spi_unoptimize_message(msg);
4343 msg->pre_optimized = false;
4344}
4345EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4346
4347static int __spi_async(struct spi_device *spi, struct spi_message *message)
4348{
4349 struct spi_controller *ctlr = spi->controller;
4350 struct spi_transfer *xfer;
4351
4352 /*
4353 * Some controllers do not support doing regular SPI transfers. Return
4354 * ENOTSUPP when this is the case.
4355 */
4356 if (!ctlr->transfer)
4357 return -ENOTSUPP;
4358
4359 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4360 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4361
4362 trace_spi_message_submit(message);
4363
4364 if (!ctlr->ptp_sts_supported) {
4365 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4366 xfer->ptp_sts_word_pre = 0;
4367 ptp_read_system_prets(xfer->ptp_sts);
4368 }
4369 }
4370
4371 return ctlr->transfer(spi, message);
4372}
4373
4374/**
4375 * spi_async - asynchronous SPI transfer
4376 * @spi: device with which data will be exchanged
4377 * @message: describes the data transfers, including completion callback
4378 * Context: any (IRQs may be blocked, etc)
4379 *
4380 * This call may be used in_irq and other contexts which can't sleep,
4381 * as well as from task contexts which can sleep.
4382 *
4383 * The completion callback is invoked in a context which can't sleep.
4384 * Before that invocation, the value of message->status is undefined.
4385 * When the callback is issued, message->status holds either zero (to
4386 * indicate complete success) or a negative error code. After that
4387 * callback returns, the driver which issued the transfer request may
4388 * deallocate the associated memory; it's no longer in use by any SPI
4389 * core or controller driver code.
4390 *
4391 * Note that although all messages to a spi_device are handled in
4392 * FIFO order, messages may go to different devices in other orders.
4393 * Some device might be higher priority, or have various "hard" access
4394 * time requirements, for example.
4395 *
4396 * On detection of any fault during the transfer, processing of
4397 * the entire message is aborted, and the device is deselected.
4398 * Until returning from the associated message completion callback,
4399 * no other spi_message queued to that device will be processed.
4400 * (This rule applies equally to all the synchronous transfer calls,
4401 * which are wrappers around this core asynchronous primitive.)
4402 *
4403 * Return: zero on success, else a negative error code.
4404 */
4405int spi_async(struct spi_device *spi, struct spi_message *message)
4406{
4407 struct spi_controller *ctlr = spi->controller;
4408 int ret;
4409 unsigned long flags;
4410
4411 ret = spi_maybe_optimize_message(spi, message);
4412 if (ret)
4413 return ret;
4414
4415 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4416
4417 if (ctlr->bus_lock_flag)
4418 ret = -EBUSY;
4419 else
4420 ret = __spi_async(spi, message);
4421
4422 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4423
4424 spi_maybe_unoptimize_message(message);
4425
4426 return ret;
4427}
4428EXPORT_SYMBOL_GPL(spi_async);
4429
4430static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4431{
4432 bool was_busy;
4433 int ret;
4434
4435 mutex_lock(&ctlr->io_mutex);
4436
4437 was_busy = ctlr->busy;
4438
4439 ctlr->cur_msg = msg;
4440 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4441 if (ret)
4442 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4443 ctlr->cur_msg = NULL;
4444 ctlr->fallback = false;
4445
4446 if (!was_busy) {
4447 kfree(ctlr->dummy_rx);
4448 ctlr->dummy_rx = NULL;
4449 kfree(ctlr->dummy_tx);
4450 ctlr->dummy_tx = NULL;
4451 if (ctlr->unprepare_transfer_hardware &&
4452 ctlr->unprepare_transfer_hardware(ctlr))
4453 dev_err(&ctlr->dev,
4454 "failed to unprepare transfer hardware\n");
4455 spi_idle_runtime_pm(ctlr);
4456 }
4457
4458 mutex_unlock(&ctlr->io_mutex);
4459}
4460
4461/*-------------------------------------------------------------------------*/
4462
4463/*
4464 * Utility methods for SPI protocol drivers, layered on
4465 * top of the core. Some other utility methods are defined as
4466 * inline functions.
4467 */
4468
4469static void spi_complete(void *arg)
4470{
4471 complete(arg);
4472}
4473
4474static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4475{
4476 DECLARE_COMPLETION_ONSTACK(done);
4477 unsigned long flags;
4478 int status;
4479 struct spi_controller *ctlr = spi->controller;
4480
4481 if (__spi_check_suspended(ctlr)) {
4482 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4483 return -ESHUTDOWN;
4484 }
4485
4486 status = spi_maybe_optimize_message(spi, message);
4487 if (status)
4488 return status;
4489
4490 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4491 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4492
4493 /*
4494 * Checking queue_empty here only guarantees async/sync message
4495 * ordering when coming from the same context. It does not need to
4496 * guard against reentrancy from a different context. The io_mutex
4497 * will catch those cases.
4498 */
4499 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4500 message->actual_length = 0;
4501 message->status = -EINPROGRESS;
4502
4503 trace_spi_message_submit(message);
4504
4505 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4506 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4507
4508 __spi_transfer_message_noqueue(ctlr, message);
4509
4510 return message->status;
4511 }
4512
4513 /*
4514 * There are messages in the async queue that could have originated
4515 * from the same context, so we need to preserve ordering.
4516 * Therefor we send the message to the async queue and wait until they
4517 * are completed.
4518 */
4519 message->complete = spi_complete;
4520 message->context = &done;
4521
4522 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4523 status = __spi_async(spi, message);
4524 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4525
4526 if (status == 0) {
4527 wait_for_completion(&done);
4528 status = message->status;
4529 }
4530 message->complete = NULL;
4531 message->context = NULL;
4532
4533 return status;
4534}
4535
4536/**
4537 * spi_sync - blocking/synchronous SPI data transfers
4538 * @spi: device with which data will be exchanged
4539 * @message: describes the data transfers
4540 * Context: can sleep
4541 *
4542 * This call may only be used from a context that may sleep. The sleep
4543 * is non-interruptible, and has no timeout. Low-overhead controller
4544 * drivers may DMA directly into and out of the message buffers.
4545 *
4546 * Note that the SPI device's chip select is active during the message,
4547 * and then is normally disabled between messages. Drivers for some
4548 * frequently-used devices may want to minimize costs of selecting a chip,
4549 * by leaving it selected in anticipation that the next message will go
4550 * to the same chip. (That may increase power usage.)
4551 *
4552 * Also, the caller is guaranteeing that the memory associated with the
4553 * message will not be freed before this call returns.
4554 *
4555 * Return: zero on success, else a negative error code.
4556 */
4557int spi_sync(struct spi_device *spi, struct spi_message *message)
4558{
4559 int ret;
4560
4561 mutex_lock(&spi->controller->bus_lock_mutex);
4562 ret = __spi_sync(spi, message);
4563 mutex_unlock(&spi->controller->bus_lock_mutex);
4564
4565 return ret;
4566}
4567EXPORT_SYMBOL_GPL(spi_sync);
4568
4569/**
4570 * spi_sync_locked - version of spi_sync with exclusive bus usage
4571 * @spi: device with which data will be exchanged
4572 * @message: describes the data transfers
4573 * Context: can sleep
4574 *
4575 * This call may only be used from a context that may sleep. The sleep
4576 * is non-interruptible, and has no timeout. Low-overhead controller
4577 * drivers may DMA directly into and out of the message buffers.
4578 *
4579 * This call should be used by drivers that require exclusive access to the
4580 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4581 * be released by a spi_bus_unlock call when the exclusive access is over.
4582 *
4583 * Return: zero on success, else a negative error code.
4584 */
4585int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4586{
4587 return __spi_sync(spi, message);
4588}
4589EXPORT_SYMBOL_GPL(spi_sync_locked);
4590
4591/**
4592 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4593 * @ctlr: SPI bus master that should be locked for exclusive bus access
4594 * Context: can sleep
4595 *
4596 * This call may only be used from a context that may sleep. The sleep
4597 * is non-interruptible, and has no timeout.
4598 *
4599 * This call should be used by drivers that require exclusive access to the
4600 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4601 * exclusive access is over. Data transfer must be done by spi_sync_locked
4602 * and spi_async_locked calls when the SPI bus lock is held.
4603 *
4604 * Return: always zero.
4605 */
4606int spi_bus_lock(struct spi_controller *ctlr)
4607{
4608 unsigned long flags;
4609
4610 mutex_lock(&ctlr->bus_lock_mutex);
4611
4612 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4613 ctlr->bus_lock_flag = 1;
4614 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4615
4616 /* Mutex remains locked until spi_bus_unlock() is called */
4617
4618 return 0;
4619}
4620EXPORT_SYMBOL_GPL(spi_bus_lock);
4621
4622/**
4623 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4624 * @ctlr: SPI bus master that was locked for exclusive bus access
4625 * Context: can sleep
4626 *
4627 * This call may only be used from a context that may sleep. The sleep
4628 * is non-interruptible, and has no timeout.
4629 *
4630 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4631 * call.
4632 *
4633 * Return: always zero.
4634 */
4635int spi_bus_unlock(struct spi_controller *ctlr)
4636{
4637 ctlr->bus_lock_flag = 0;
4638
4639 mutex_unlock(&ctlr->bus_lock_mutex);
4640
4641 return 0;
4642}
4643EXPORT_SYMBOL_GPL(spi_bus_unlock);
4644
4645/* Portable code must never pass more than 32 bytes */
4646#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4647
4648static u8 *buf;
4649
4650/**
4651 * spi_write_then_read - SPI synchronous write followed by read
4652 * @spi: device with which data will be exchanged
4653 * @txbuf: data to be written (need not be DMA-safe)
4654 * @n_tx: size of txbuf, in bytes
4655 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4656 * @n_rx: size of rxbuf, in bytes
4657 * Context: can sleep
4658 *
4659 * This performs a half duplex MicroWire style transaction with the
4660 * device, sending txbuf and then reading rxbuf. The return value
4661 * is zero for success, else a negative errno status code.
4662 * This call may only be used from a context that may sleep.
4663 *
4664 * Parameters to this routine are always copied using a small buffer.
4665 * Performance-sensitive or bulk transfer code should instead use
4666 * spi_{async,sync}() calls with DMA-safe buffers.
4667 *
4668 * Return: zero on success, else a negative error code.
4669 */
4670int spi_write_then_read(struct spi_device *spi,
4671 const void *txbuf, unsigned n_tx,
4672 void *rxbuf, unsigned n_rx)
4673{
4674 static DEFINE_MUTEX(lock);
4675
4676 int status;
4677 struct spi_message message;
4678 struct spi_transfer x[2];
4679 u8 *local_buf;
4680
4681 /*
4682 * Use preallocated DMA-safe buffer if we can. We can't avoid
4683 * copying here, (as a pure convenience thing), but we can
4684 * keep heap costs out of the hot path unless someone else is
4685 * using the pre-allocated buffer or the transfer is too large.
4686 */
4687 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4688 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4689 GFP_KERNEL | GFP_DMA);
4690 if (!local_buf)
4691 return -ENOMEM;
4692 } else {
4693 local_buf = buf;
4694 }
4695
4696 spi_message_init(&message);
4697 memset(x, 0, sizeof(x));
4698 if (n_tx) {
4699 x[0].len = n_tx;
4700 spi_message_add_tail(&x[0], &message);
4701 }
4702 if (n_rx) {
4703 x[1].len = n_rx;
4704 spi_message_add_tail(&x[1], &message);
4705 }
4706
4707 memcpy(local_buf, txbuf, n_tx);
4708 x[0].tx_buf = local_buf;
4709 x[1].rx_buf = local_buf + n_tx;
4710
4711 /* Do the I/O */
4712 status = spi_sync(spi, &message);
4713 if (status == 0)
4714 memcpy(rxbuf, x[1].rx_buf, n_rx);
4715
4716 if (x[0].tx_buf == buf)
4717 mutex_unlock(&lock);
4718 else
4719 kfree(local_buf);
4720
4721 return status;
4722}
4723EXPORT_SYMBOL_GPL(spi_write_then_read);
4724
4725/*-------------------------------------------------------------------------*/
4726
4727#if IS_ENABLED(CONFIG_OF_DYNAMIC)
4728/* Must call put_device() when done with returned spi_device device */
4729static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4730{
4731 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4732
4733 return dev ? to_spi_device(dev) : NULL;
4734}
4735
4736/* The spi controllers are not using spi_bus, so we find it with another way */
4737static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4738{
4739 struct device *dev;
4740
4741 dev = class_find_device_by_of_node(&spi_master_class, node);
4742 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4743 dev = class_find_device_by_of_node(&spi_slave_class, node);
4744 if (!dev)
4745 return NULL;
4746
4747 /* Reference got in class_find_device */
4748 return container_of(dev, struct spi_controller, dev);
4749}
4750
4751static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4752 void *arg)
4753{
4754 struct of_reconfig_data *rd = arg;
4755 struct spi_controller *ctlr;
4756 struct spi_device *spi;
4757
4758 switch (of_reconfig_get_state_change(action, arg)) {
4759 case OF_RECONFIG_CHANGE_ADD:
4760 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4761 if (ctlr == NULL)
4762 return NOTIFY_OK; /* Not for us */
4763
4764 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4765 put_device(&ctlr->dev);
4766 return NOTIFY_OK;
4767 }
4768
4769 /*
4770 * Clear the flag before adding the device so that fw_devlink
4771 * doesn't skip adding consumers to this device.
4772 */
4773 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4774 spi = of_register_spi_device(ctlr, rd->dn);
4775 put_device(&ctlr->dev);
4776
4777 if (IS_ERR(spi)) {
4778 pr_err("%s: failed to create for '%pOF'\n",
4779 __func__, rd->dn);
4780 of_node_clear_flag(rd->dn, OF_POPULATED);
4781 return notifier_from_errno(PTR_ERR(spi));
4782 }
4783 break;
4784
4785 case OF_RECONFIG_CHANGE_REMOVE:
4786 /* Already depopulated? */
4787 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4788 return NOTIFY_OK;
4789
4790 /* Find our device by node */
4791 spi = of_find_spi_device_by_node(rd->dn);
4792 if (spi == NULL)
4793 return NOTIFY_OK; /* No? not meant for us */
4794
4795 /* Unregister takes one ref away */
4796 spi_unregister_device(spi);
4797
4798 /* And put the reference of the find */
4799 put_device(&spi->dev);
4800 break;
4801 }
4802
4803 return NOTIFY_OK;
4804}
4805
4806static struct notifier_block spi_of_notifier = {
4807 .notifier_call = of_spi_notify,
4808};
4809#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4810extern struct notifier_block spi_of_notifier;
4811#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4812
4813#if IS_ENABLED(CONFIG_ACPI)
4814static int spi_acpi_controller_match(struct device *dev, const void *data)
4815{
4816 return ACPI_COMPANION(dev->parent) == data;
4817}
4818
4819struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4820{
4821 struct device *dev;
4822
4823 dev = class_find_device(&spi_master_class, NULL, adev,
4824 spi_acpi_controller_match);
4825 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4826 dev = class_find_device(&spi_slave_class, NULL, adev,
4827 spi_acpi_controller_match);
4828 if (!dev)
4829 return NULL;
4830
4831 return container_of(dev, struct spi_controller, dev);
4832}
4833EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4834
4835static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4836{
4837 struct device *dev;
4838
4839 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4840 return to_spi_device(dev);
4841}
4842
4843static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4844 void *arg)
4845{
4846 struct acpi_device *adev = arg;
4847 struct spi_controller *ctlr;
4848 struct spi_device *spi;
4849
4850 switch (value) {
4851 case ACPI_RECONFIG_DEVICE_ADD:
4852 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4853 if (!ctlr)
4854 break;
4855
4856 acpi_register_spi_device(ctlr, adev);
4857 put_device(&ctlr->dev);
4858 break;
4859 case ACPI_RECONFIG_DEVICE_REMOVE:
4860 if (!acpi_device_enumerated(adev))
4861 break;
4862
4863 spi = acpi_spi_find_device_by_adev(adev);
4864 if (!spi)
4865 break;
4866
4867 spi_unregister_device(spi);
4868 put_device(&spi->dev);
4869 break;
4870 }
4871
4872 return NOTIFY_OK;
4873}
4874
4875static struct notifier_block spi_acpi_notifier = {
4876 .notifier_call = acpi_spi_notify,
4877};
4878#else
4879extern struct notifier_block spi_acpi_notifier;
4880#endif
4881
4882static int __init spi_init(void)
4883{
4884 int status;
4885
4886 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4887 if (!buf) {
4888 status = -ENOMEM;
4889 goto err0;
4890 }
4891
4892 status = bus_register(&spi_bus_type);
4893 if (status < 0)
4894 goto err1;
4895
4896 status = class_register(&spi_master_class);
4897 if (status < 0)
4898 goto err2;
4899
4900 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4901 status = class_register(&spi_slave_class);
4902 if (status < 0)
4903 goto err3;
4904 }
4905
4906 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4907 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4908 if (IS_ENABLED(CONFIG_ACPI))
4909 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4910
4911 return 0;
4912
4913err3:
4914 class_unregister(&spi_master_class);
4915err2:
4916 bus_unregister(&spi_bus_type);
4917err1:
4918 kfree(buf);
4919 buf = NULL;
4920err0:
4921 return status;
4922}
4923
4924/*
4925 * A board_info is normally registered in arch_initcall(),
4926 * but even essential drivers wait till later.
4927 *
4928 * REVISIT only boardinfo really needs static linking. The rest (device and
4929 * driver registration) _could_ be dynamically linked (modular) ... Costs
4930 * include needing to have boardinfo data structures be much more public.
4931 */
4932postcore_initcall(spi_init);