Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2// SPI init/core code
3//
4// Copyright (C) 2005 David Brownell
5// Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7#include <linux/kernel.h>
8#include <linux/device.h>
9#include <linux/init.h>
10#include <linux/cache.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/mutex.h>
14#include <linux/of_device.h>
15#include <linux/of_irq.h>
16#include <linux/clk/clk-conf.h>
17#include <linux/slab.h>
18#include <linux/mod_devicetable.h>
19#include <linux/spi/spi.h>
20#include <linux/spi/spi-mem.h>
21#include <linux/gpio/consumer.h>
22#include <linux/pm_runtime.h>
23#include <linux/pm_domain.h>
24#include <linux/property.h>
25#include <linux/export.h>
26#include <linux/sched/rt.h>
27#include <uapi/linux/sched/types.h>
28#include <linux/delay.h>
29#include <linux/kthread.h>
30#include <linux/ioport.h>
31#include <linux/acpi.h>
32#include <linux/highmem.h>
33#include <linux/idr.h>
34#include <linux/platform_data/x86/apple.h>
35#include <linux/ptp_clock_kernel.h>
36#include <linux/percpu.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/spi.h>
40EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43#include "internals.h"
44
45static DEFINE_IDR(spi_master_idr);
46
47static void spidev_release(struct device *dev)
48{
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55}
56
57static ssize_t
58modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59{
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68}
69static DEVICE_ATTR_RO(modalias);
70
71static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74{
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83}
84
85static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87{
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95}
96static DEVICE_ATTR_RW(driver_override);
97
98static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99{
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118}
119
120#define spi_pcpu_stats_totalize(ret, in, field) \
121do { \
122 int i; \
123 ret = 0; \
124 for_each_possible_cpu(i) { \
125 const struct spi_statistics *pcpu_stats; \
126 u64 inc; \
127 unsigned int start; \
128 pcpu_stats = per_cpu_ptr(in, i); \
129 do { \
130 start = u64_stats_fetch_begin( \
131 &pcpu_stats->syncp); \
132 inc = u64_stats_read(&pcpu_stats->field); \
133 } while (u64_stats_fetch_retry( \
134 &pcpu_stats->syncp, start)); \
135 ret += inc; \
136 } \
137} while (0)
138
139#define SPI_STATISTICS_ATTRS(field, file) \
140static ssize_t spi_controller_##field##_show(struct device *dev, \
141 struct device_attribute *attr, \
142 char *buf) \
143{ \
144 struct spi_controller *ctlr = container_of(dev, \
145 struct spi_controller, dev); \
146 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
147} \
148static struct device_attribute dev_attr_spi_controller_##field = { \
149 .attr = { .name = file, .mode = 0444 }, \
150 .show = spi_controller_##field##_show, \
151}; \
152static ssize_t spi_device_##field##_show(struct device *dev, \
153 struct device_attribute *attr, \
154 char *buf) \
155{ \
156 struct spi_device *spi = to_spi_device(dev); \
157 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
158} \
159static struct device_attribute dev_attr_spi_device_##field = { \
160 .attr = { .name = file, .mode = 0444 }, \
161 .show = spi_device_##field##_show, \
162}
163
164#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
165static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
166 char *buf) \
167{ \
168 ssize_t len; \
169 u64 val; \
170 spi_pcpu_stats_totalize(val, stat, field); \
171 len = sysfs_emit(buf, "%llu\n", val); \
172 return len; \
173} \
174SPI_STATISTICS_ATTRS(name, file)
175
176#define SPI_STATISTICS_SHOW(field) \
177 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
178 field)
179
180SPI_STATISTICS_SHOW(messages);
181SPI_STATISTICS_SHOW(transfers);
182SPI_STATISTICS_SHOW(errors);
183SPI_STATISTICS_SHOW(timedout);
184
185SPI_STATISTICS_SHOW(spi_sync);
186SPI_STATISTICS_SHOW(spi_sync_immediate);
187SPI_STATISTICS_SHOW(spi_async);
188
189SPI_STATISTICS_SHOW(bytes);
190SPI_STATISTICS_SHOW(bytes_rx);
191SPI_STATISTICS_SHOW(bytes_tx);
192
193#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
194 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
195 "transfer_bytes_histo_" number, \
196 transfer_bytes_histo[index])
197SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
198SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
199SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
200SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
201SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
202SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
203SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
204SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
205SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
206SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
207SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
208SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
209SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
210SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
211SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
212SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
213SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
214
215SPI_STATISTICS_SHOW(transfers_split_maxsize);
216
217static struct attribute *spi_dev_attrs[] = {
218 &dev_attr_modalias.attr,
219 &dev_attr_driver_override.attr,
220 NULL,
221};
222
223static const struct attribute_group spi_dev_group = {
224 .attrs = spi_dev_attrs,
225};
226
227static struct attribute *spi_device_statistics_attrs[] = {
228 &dev_attr_spi_device_messages.attr,
229 &dev_attr_spi_device_transfers.attr,
230 &dev_attr_spi_device_errors.attr,
231 &dev_attr_spi_device_timedout.attr,
232 &dev_attr_spi_device_spi_sync.attr,
233 &dev_attr_spi_device_spi_sync_immediate.attr,
234 &dev_attr_spi_device_spi_async.attr,
235 &dev_attr_spi_device_bytes.attr,
236 &dev_attr_spi_device_bytes_rx.attr,
237 &dev_attr_spi_device_bytes_tx.attr,
238 &dev_attr_spi_device_transfer_bytes_histo0.attr,
239 &dev_attr_spi_device_transfer_bytes_histo1.attr,
240 &dev_attr_spi_device_transfer_bytes_histo2.attr,
241 &dev_attr_spi_device_transfer_bytes_histo3.attr,
242 &dev_attr_spi_device_transfer_bytes_histo4.attr,
243 &dev_attr_spi_device_transfer_bytes_histo5.attr,
244 &dev_attr_spi_device_transfer_bytes_histo6.attr,
245 &dev_attr_spi_device_transfer_bytes_histo7.attr,
246 &dev_attr_spi_device_transfer_bytes_histo8.attr,
247 &dev_attr_spi_device_transfer_bytes_histo9.attr,
248 &dev_attr_spi_device_transfer_bytes_histo10.attr,
249 &dev_attr_spi_device_transfer_bytes_histo11.attr,
250 &dev_attr_spi_device_transfer_bytes_histo12.attr,
251 &dev_attr_spi_device_transfer_bytes_histo13.attr,
252 &dev_attr_spi_device_transfer_bytes_histo14.attr,
253 &dev_attr_spi_device_transfer_bytes_histo15.attr,
254 &dev_attr_spi_device_transfer_bytes_histo16.attr,
255 &dev_attr_spi_device_transfers_split_maxsize.attr,
256 NULL,
257};
258
259static const struct attribute_group spi_device_statistics_group = {
260 .name = "statistics",
261 .attrs = spi_device_statistics_attrs,
262};
263
264static const struct attribute_group *spi_dev_groups[] = {
265 &spi_dev_group,
266 &spi_device_statistics_group,
267 NULL,
268};
269
270static struct attribute *spi_controller_statistics_attrs[] = {
271 &dev_attr_spi_controller_messages.attr,
272 &dev_attr_spi_controller_transfers.attr,
273 &dev_attr_spi_controller_errors.attr,
274 &dev_attr_spi_controller_timedout.attr,
275 &dev_attr_spi_controller_spi_sync.attr,
276 &dev_attr_spi_controller_spi_sync_immediate.attr,
277 &dev_attr_spi_controller_spi_async.attr,
278 &dev_attr_spi_controller_bytes.attr,
279 &dev_attr_spi_controller_bytes_rx.attr,
280 &dev_attr_spi_controller_bytes_tx.attr,
281 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
298 &dev_attr_spi_controller_transfers_split_maxsize.attr,
299 NULL,
300};
301
302static const struct attribute_group spi_controller_statistics_group = {
303 .name = "statistics",
304 .attrs = spi_controller_statistics_attrs,
305};
306
307static const struct attribute_group *spi_master_groups[] = {
308 &spi_controller_statistics_group,
309 NULL,
310};
311
312static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
313 struct spi_transfer *xfer,
314 struct spi_controller *ctlr)
315{
316 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
317 struct spi_statistics *stats;
318
319 if (l2len < 0)
320 l2len = 0;
321
322 get_cpu();
323 stats = this_cpu_ptr(pcpu_stats);
324 u64_stats_update_begin(&stats->syncp);
325
326 u64_stats_inc(&stats->transfers);
327 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
328
329 u64_stats_add(&stats->bytes, xfer->len);
330 if ((xfer->tx_buf) &&
331 (xfer->tx_buf != ctlr->dummy_tx))
332 u64_stats_add(&stats->bytes_tx, xfer->len);
333 if ((xfer->rx_buf) &&
334 (xfer->rx_buf != ctlr->dummy_rx))
335 u64_stats_add(&stats->bytes_rx, xfer->len);
336
337 u64_stats_update_end(&stats->syncp);
338 put_cpu();
339}
340
341/*
342 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
343 * and the sysfs version makes coldplug work too.
344 */
345static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
346{
347 while (id->name[0]) {
348 if (!strcmp(name, id->name))
349 return id;
350 id++;
351 }
352 return NULL;
353}
354
355const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
356{
357 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
358
359 return spi_match_id(sdrv->id_table, sdev->modalias);
360}
361EXPORT_SYMBOL_GPL(spi_get_device_id);
362
363const void *spi_get_device_match_data(const struct spi_device *sdev)
364{
365 const void *match;
366
367 match = device_get_match_data(&sdev->dev);
368 if (match)
369 return match;
370
371 return (const void *)spi_get_device_id(sdev)->driver_data;
372}
373EXPORT_SYMBOL_GPL(spi_get_device_match_data);
374
375static int spi_match_device(struct device *dev, struct device_driver *drv)
376{
377 const struct spi_device *spi = to_spi_device(dev);
378 const struct spi_driver *sdrv = to_spi_driver(drv);
379
380 /* Check override first, and if set, only use the named driver */
381 if (spi->driver_override)
382 return strcmp(spi->driver_override, drv->name) == 0;
383
384 /* Attempt an OF style match */
385 if (of_driver_match_device(dev, drv))
386 return 1;
387
388 /* Then try ACPI */
389 if (acpi_driver_match_device(dev, drv))
390 return 1;
391
392 if (sdrv->id_table)
393 return !!spi_match_id(sdrv->id_table, spi->modalias);
394
395 return strcmp(spi->modalias, drv->name) == 0;
396}
397
398static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
399{
400 const struct spi_device *spi = to_spi_device(dev);
401 int rc;
402
403 rc = acpi_device_uevent_modalias(dev, env);
404 if (rc != -ENODEV)
405 return rc;
406
407 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
408}
409
410static int spi_probe(struct device *dev)
411{
412 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
413 struct spi_device *spi = to_spi_device(dev);
414 int ret;
415
416 ret = of_clk_set_defaults(dev->of_node, false);
417 if (ret)
418 return ret;
419
420 if (dev->of_node) {
421 spi->irq = of_irq_get(dev->of_node, 0);
422 if (spi->irq == -EPROBE_DEFER)
423 return -EPROBE_DEFER;
424 if (spi->irq < 0)
425 spi->irq = 0;
426 }
427
428 ret = dev_pm_domain_attach(dev, true);
429 if (ret)
430 return ret;
431
432 if (sdrv->probe) {
433 ret = sdrv->probe(spi);
434 if (ret)
435 dev_pm_domain_detach(dev, true);
436 }
437
438 return ret;
439}
440
441static void spi_remove(struct device *dev)
442{
443 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
444
445 if (sdrv->remove)
446 sdrv->remove(to_spi_device(dev));
447
448 dev_pm_domain_detach(dev, true);
449}
450
451static void spi_shutdown(struct device *dev)
452{
453 if (dev->driver) {
454 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
455
456 if (sdrv->shutdown)
457 sdrv->shutdown(to_spi_device(dev));
458 }
459}
460
461struct bus_type spi_bus_type = {
462 .name = "spi",
463 .dev_groups = spi_dev_groups,
464 .match = spi_match_device,
465 .uevent = spi_uevent,
466 .probe = spi_probe,
467 .remove = spi_remove,
468 .shutdown = spi_shutdown,
469};
470EXPORT_SYMBOL_GPL(spi_bus_type);
471
472/**
473 * __spi_register_driver - register a SPI driver
474 * @owner: owner module of the driver to register
475 * @sdrv: the driver to register
476 * Context: can sleep
477 *
478 * Return: zero on success, else a negative error code.
479 */
480int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
481{
482 sdrv->driver.owner = owner;
483 sdrv->driver.bus = &spi_bus_type;
484
485 /*
486 * For Really Good Reasons we use spi: modaliases not of:
487 * modaliases for DT so module autoloading won't work if we
488 * don't have a spi_device_id as well as a compatible string.
489 */
490 if (sdrv->driver.of_match_table) {
491 const struct of_device_id *of_id;
492
493 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
494 of_id++) {
495 const char *of_name;
496
497 /* Strip off any vendor prefix */
498 of_name = strnchr(of_id->compatible,
499 sizeof(of_id->compatible), ',');
500 if (of_name)
501 of_name++;
502 else
503 of_name = of_id->compatible;
504
505 if (sdrv->id_table) {
506 const struct spi_device_id *spi_id;
507
508 spi_id = spi_match_id(sdrv->id_table, of_name);
509 if (spi_id)
510 continue;
511 } else {
512 if (strcmp(sdrv->driver.name, of_name) == 0)
513 continue;
514 }
515
516 pr_warn("SPI driver %s has no spi_device_id for %s\n",
517 sdrv->driver.name, of_id->compatible);
518 }
519 }
520
521 return driver_register(&sdrv->driver);
522}
523EXPORT_SYMBOL_GPL(__spi_register_driver);
524
525/*-------------------------------------------------------------------------*/
526
527/*
528 * SPI devices should normally not be created by SPI device drivers; that
529 * would make them board-specific. Similarly with SPI controller drivers.
530 * Device registration normally goes into like arch/.../mach.../board-YYY.c
531 * with other readonly (flashable) information about mainboard devices.
532 */
533
534struct boardinfo {
535 struct list_head list;
536 struct spi_board_info board_info;
537};
538
539static LIST_HEAD(board_list);
540static LIST_HEAD(spi_controller_list);
541
542/*
543 * Used to protect add/del operation for board_info list and
544 * spi_controller list, and their matching process also used
545 * to protect object of type struct idr.
546 */
547static DEFINE_MUTEX(board_lock);
548
549/**
550 * spi_alloc_device - Allocate a new SPI device
551 * @ctlr: Controller to which device is connected
552 * Context: can sleep
553 *
554 * Allows a driver to allocate and initialize a spi_device without
555 * registering it immediately. This allows a driver to directly
556 * fill the spi_device with device parameters before calling
557 * spi_add_device() on it.
558 *
559 * Caller is responsible to call spi_add_device() on the returned
560 * spi_device structure to add it to the SPI controller. If the caller
561 * needs to discard the spi_device without adding it, then it should
562 * call spi_dev_put() on it.
563 *
564 * Return: a pointer to the new device, or NULL.
565 */
566struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
567{
568 struct spi_device *spi;
569
570 if (!spi_controller_get(ctlr))
571 return NULL;
572
573 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
574 if (!spi) {
575 spi_controller_put(ctlr);
576 return NULL;
577 }
578
579 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
580 if (!spi->pcpu_statistics) {
581 kfree(spi);
582 spi_controller_put(ctlr);
583 return NULL;
584 }
585
586 spi->master = spi->controller = ctlr;
587 spi->dev.parent = &ctlr->dev;
588 spi->dev.bus = &spi_bus_type;
589 spi->dev.release = spidev_release;
590 spi->mode = ctlr->buswidth_override_bits;
591
592 device_initialize(&spi->dev);
593 return spi;
594}
595EXPORT_SYMBOL_GPL(spi_alloc_device);
596
597static void spi_dev_set_name(struct spi_device *spi)
598{
599 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
600
601 if (adev) {
602 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
603 return;
604 }
605
606 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
607 spi->chip_select);
608}
609
610static int spi_dev_check(struct device *dev, void *data)
611{
612 struct spi_device *spi = to_spi_device(dev);
613 struct spi_device *new_spi = data;
614
615 if (spi->controller == new_spi->controller &&
616 spi->chip_select == new_spi->chip_select)
617 return -EBUSY;
618 return 0;
619}
620
621static void spi_cleanup(struct spi_device *spi)
622{
623 if (spi->controller->cleanup)
624 spi->controller->cleanup(spi);
625}
626
627static int __spi_add_device(struct spi_device *spi)
628{
629 struct spi_controller *ctlr = spi->controller;
630 struct device *dev = ctlr->dev.parent;
631 int status;
632
633 /*
634 * We need to make sure there's no other device with this
635 * chipselect **BEFORE** we call setup(), else we'll trash
636 * its configuration.
637 */
638 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
639 if (status) {
640 dev_err(dev, "chipselect %d already in use\n",
641 spi->chip_select);
642 return status;
643 }
644
645 /* Controller may unregister concurrently */
646 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
647 !device_is_registered(&ctlr->dev)) {
648 return -ENODEV;
649 }
650
651 if (ctlr->cs_gpiods)
652 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
653
654 /*
655 * Drivers may modify this initial i/o setup, but will
656 * normally rely on the device being setup. Devices
657 * using SPI_CS_HIGH can't coexist well otherwise...
658 */
659 status = spi_setup(spi);
660 if (status < 0) {
661 dev_err(dev, "can't setup %s, status %d\n",
662 dev_name(&spi->dev), status);
663 return status;
664 }
665
666 /* Device may be bound to an active driver when this returns */
667 status = device_add(&spi->dev);
668 if (status < 0) {
669 dev_err(dev, "can't add %s, status %d\n",
670 dev_name(&spi->dev), status);
671 spi_cleanup(spi);
672 } else {
673 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
674 }
675
676 return status;
677}
678
679/**
680 * spi_add_device - Add spi_device allocated with spi_alloc_device
681 * @spi: spi_device to register
682 *
683 * Companion function to spi_alloc_device. Devices allocated with
684 * spi_alloc_device can be added onto the spi bus with this function.
685 *
686 * Return: 0 on success; negative errno on failure
687 */
688int spi_add_device(struct spi_device *spi)
689{
690 struct spi_controller *ctlr = spi->controller;
691 struct device *dev = ctlr->dev.parent;
692 int status;
693
694 /* Chipselects are numbered 0..max; validate. */
695 if (spi->chip_select >= ctlr->num_chipselect) {
696 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
697 ctlr->num_chipselect);
698 return -EINVAL;
699 }
700
701 /* Set the bus ID string */
702 spi_dev_set_name(spi);
703
704 mutex_lock(&ctlr->add_lock);
705 status = __spi_add_device(spi);
706 mutex_unlock(&ctlr->add_lock);
707 return status;
708}
709EXPORT_SYMBOL_GPL(spi_add_device);
710
711static int spi_add_device_locked(struct spi_device *spi)
712{
713 struct spi_controller *ctlr = spi->controller;
714 struct device *dev = ctlr->dev.parent;
715
716 /* Chipselects are numbered 0..max; validate. */
717 if (spi->chip_select >= ctlr->num_chipselect) {
718 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
719 ctlr->num_chipselect);
720 return -EINVAL;
721 }
722
723 /* Set the bus ID string */
724 spi_dev_set_name(spi);
725
726 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
727 return __spi_add_device(spi);
728}
729
730/**
731 * spi_new_device - instantiate one new SPI device
732 * @ctlr: Controller to which device is connected
733 * @chip: Describes the SPI device
734 * Context: can sleep
735 *
736 * On typical mainboards, this is purely internal; and it's not needed
737 * after board init creates the hard-wired devices. Some development
738 * platforms may not be able to use spi_register_board_info though, and
739 * this is exported so that for example a USB or parport based adapter
740 * driver could add devices (which it would learn about out-of-band).
741 *
742 * Return: the new device, or NULL.
743 */
744struct spi_device *spi_new_device(struct spi_controller *ctlr,
745 struct spi_board_info *chip)
746{
747 struct spi_device *proxy;
748 int status;
749
750 /*
751 * NOTE: caller did any chip->bus_num checks necessary.
752 *
753 * Also, unless we change the return value convention to use
754 * error-or-pointer (not NULL-or-pointer), troubleshootability
755 * suggests syslogged diagnostics are best here (ugh).
756 */
757
758 proxy = spi_alloc_device(ctlr);
759 if (!proxy)
760 return NULL;
761
762 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
763
764 proxy->chip_select = chip->chip_select;
765 proxy->max_speed_hz = chip->max_speed_hz;
766 proxy->mode = chip->mode;
767 proxy->irq = chip->irq;
768 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
769 proxy->dev.platform_data = (void *) chip->platform_data;
770 proxy->controller_data = chip->controller_data;
771 proxy->controller_state = NULL;
772
773 if (chip->swnode) {
774 status = device_add_software_node(&proxy->dev, chip->swnode);
775 if (status) {
776 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
777 chip->modalias, status);
778 goto err_dev_put;
779 }
780 }
781
782 status = spi_add_device(proxy);
783 if (status < 0)
784 goto err_dev_put;
785
786 return proxy;
787
788err_dev_put:
789 device_remove_software_node(&proxy->dev);
790 spi_dev_put(proxy);
791 return NULL;
792}
793EXPORT_SYMBOL_GPL(spi_new_device);
794
795/**
796 * spi_unregister_device - unregister a single SPI device
797 * @spi: spi_device to unregister
798 *
799 * Start making the passed SPI device vanish. Normally this would be handled
800 * by spi_unregister_controller().
801 */
802void spi_unregister_device(struct spi_device *spi)
803{
804 if (!spi)
805 return;
806
807 if (spi->dev.of_node) {
808 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
809 of_node_put(spi->dev.of_node);
810 }
811 if (ACPI_COMPANION(&spi->dev))
812 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
813 device_remove_software_node(&spi->dev);
814 device_del(&spi->dev);
815 spi_cleanup(spi);
816 put_device(&spi->dev);
817}
818EXPORT_SYMBOL_GPL(spi_unregister_device);
819
820static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
821 struct spi_board_info *bi)
822{
823 struct spi_device *dev;
824
825 if (ctlr->bus_num != bi->bus_num)
826 return;
827
828 dev = spi_new_device(ctlr, bi);
829 if (!dev)
830 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
831 bi->modalias);
832}
833
834/**
835 * spi_register_board_info - register SPI devices for a given board
836 * @info: array of chip descriptors
837 * @n: how many descriptors are provided
838 * Context: can sleep
839 *
840 * Board-specific early init code calls this (probably during arch_initcall)
841 * with segments of the SPI device table. Any device nodes are created later,
842 * after the relevant parent SPI controller (bus_num) is defined. We keep
843 * this table of devices forever, so that reloading a controller driver will
844 * not make Linux forget about these hard-wired devices.
845 *
846 * Other code can also call this, e.g. a particular add-on board might provide
847 * SPI devices through its expansion connector, so code initializing that board
848 * would naturally declare its SPI devices.
849 *
850 * The board info passed can safely be __initdata ... but be careful of
851 * any embedded pointers (platform_data, etc), they're copied as-is.
852 *
853 * Return: zero on success, else a negative error code.
854 */
855int spi_register_board_info(struct spi_board_info const *info, unsigned n)
856{
857 struct boardinfo *bi;
858 int i;
859
860 if (!n)
861 return 0;
862
863 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
864 if (!bi)
865 return -ENOMEM;
866
867 for (i = 0; i < n; i++, bi++, info++) {
868 struct spi_controller *ctlr;
869
870 memcpy(&bi->board_info, info, sizeof(*info));
871
872 mutex_lock(&board_lock);
873 list_add_tail(&bi->list, &board_list);
874 list_for_each_entry(ctlr, &spi_controller_list, list)
875 spi_match_controller_to_boardinfo(ctlr,
876 &bi->board_info);
877 mutex_unlock(&board_lock);
878 }
879
880 return 0;
881}
882
883/*-------------------------------------------------------------------------*/
884
885/* Core methods for SPI resource management */
886
887/**
888 * spi_res_alloc - allocate a spi resource that is life-cycle managed
889 * during the processing of a spi_message while using
890 * spi_transfer_one
891 * @spi: the spi device for which we allocate memory
892 * @release: the release code to execute for this resource
893 * @size: size to alloc and return
894 * @gfp: GFP allocation flags
895 *
896 * Return: the pointer to the allocated data
897 *
898 * This may get enhanced in the future to allocate from a memory pool
899 * of the @spi_device or @spi_controller to avoid repeated allocations.
900 */
901static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
902 size_t size, gfp_t gfp)
903{
904 struct spi_res *sres;
905
906 sres = kzalloc(sizeof(*sres) + size, gfp);
907 if (!sres)
908 return NULL;
909
910 INIT_LIST_HEAD(&sres->entry);
911 sres->release = release;
912
913 return sres->data;
914}
915
916/**
917 * spi_res_free - free an spi resource
918 * @res: pointer to the custom data of a resource
919 */
920static void spi_res_free(void *res)
921{
922 struct spi_res *sres = container_of(res, struct spi_res, data);
923
924 if (!res)
925 return;
926
927 WARN_ON(!list_empty(&sres->entry));
928 kfree(sres);
929}
930
931/**
932 * spi_res_add - add a spi_res to the spi_message
933 * @message: the spi message
934 * @res: the spi_resource
935 */
936static void spi_res_add(struct spi_message *message, void *res)
937{
938 struct spi_res *sres = container_of(res, struct spi_res, data);
939
940 WARN_ON(!list_empty(&sres->entry));
941 list_add_tail(&sres->entry, &message->resources);
942}
943
944/**
945 * spi_res_release - release all spi resources for this message
946 * @ctlr: the @spi_controller
947 * @message: the @spi_message
948 */
949static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
950{
951 struct spi_res *res, *tmp;
952
953 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
954 if (res->release)
955 res->release(ctlr, message, res->data);
956
957 list_del(&res->entry);
958
959 kfree(res);
960 }
961}
962
963/*-------------------------------------------------------------------------*/
964
965static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
966{
967 bool activate = enable;
968
969 /*
970 * Avoid calling into the driver (or doing delays) if the chip select
971 * isn't actually changing from the last time this was called.
972 */
973 if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
974 (!enable && spi->controller->last_cs != spi->chip_select)) &&
975 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
976 return;
977
978 trace_spi_set_cs(spi, activate);
979
980 spi->controller->last_cs = enable ? spi->chip_select : -1;
981 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
982
983 if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
984 spi_delay_exec(&spi->cs_hold, NULL);
985 }
986
987 if (spi->mode & SPI_CS_HIGH)
988 enable = !enable;
989
990 if (spi->cs_gpiod) {
991 if (!(spi->mode & SPI_NO_CS)) {
992 /*
993 * Historically ACPI has no means of the GPIO polarity and
994 * thus the SPISerialBus() resource defines it on the per-chip
995 * basis. In order to avoid a chain of negations, the GPIO
996 * polarity is considered being Active High. Even for the cases
997 * when _DSD() is involved (in the updated versions of ACPI)
998 * the GPIO CS polarity must be defined Active High to avoid
999 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1000 * into account.
1001 */
1002 if (has_acpi_companion(&spi->dev))
1003 gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
1004 else
1005 /* Polarity handled by GPIO library */
1006 gpiod_set_value_cansleep(spi->cs_gpiod, activate);
1007 }
1008 /* Some SPI masters need both GPIO CS & slave_select */
1009 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
1010 spi->controller->set_cs)
1011 spi->controller->set_cs(spi, !enable);
1012 } else if (spi->controller->set_cs) {
1013 spi->controller->set_cs(spi, !enable);
1014 }
1015
1016 if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
1017 if (activate)
1018 spi_delay_exec(&spi->cs_setup, NULL);
1019 else
1020 spi_delay_exec(&spi->cs_inactive, NULL);
1021 }
1022}
1023
1024#ifdef CONFIG_HAS_DMA
1025static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1026 struct sg_table *sgt, void *buf, size_t len,
1027 enum dma_data_direction dir, unsigned long attrs)
1028{
1029 const bool vmalloced_buf = is_vmalloc_addr(buf);
1030 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1031#ifdef CONFIG_HIGHMEM
1032 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1033 (unsigned long)buf < (PKMAP_BASE +
1034 (LAST_PKMAP * PAGE_SIZE)));
1035#else
1036 const bool kmap_buf = false;
1037#endif
1038 int desc_len;
1039 int sgs;
1040 struct page *vm_page;
1041 struct scatterlist *sg;
1042 void *sg_buf;
1043 size_t min;
1044 int i, ret;
1045
1046 if (vmalloced_buf || kmap_buf) {
1047 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1048 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1049 } else if (virt_addr_valid(buf)) {
1050 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1051 sgs = DIV_ROUND_UP(len, desc_len);
1052 } else {
1053 return -EINVAL;
1054 }
1055
1056 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1057 if (ret != 0)
1058 return ret;
1059
1060 sg = &sgt->sgl[0];
1061 for (i = 0; i < sgs; i++) {
1062
1063 if (vmalloced_buf || kmap_buf) {
1064 /*
1065 * Next scatterlist entry size is the minimum between
1066 * the desc_len and the remaining buffer length that
1067 * fits in a page.
1068 */
1069 min = min_t(size_t, desc_len,
1070 min_t(size_t, len,
1071 PAGE_SIZE - offset_in_page(buf)));
1072 if (vmalloced_buf)
1073 vm_page = vmalloc_to_page(buf);
1074 else
1075 vm_page = kmap_to_page(buf);
1076 if (!vm_page) {
1077 sg_free_table(sgt);
1078 return -ENOMEM;
1079 }
1080 sg_set_page(sg, vm_page,
1081 min, offset_in_page(buf));
1082 } else {
1083 min = min_t(size_t, len, desc_len);
1084 sg_buf = buf;
1085 sg_set_buf(sg, sg_buf, min);
1086 }
1087
1088 buf += min;
1089 len -= min;
1090 sg = sg_next(sg);
1091 }
1092
1093 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1094 if (ret < 0) {
1095 sg_free_table(sgt);
1096 return ret;
1097 }
1098
1099 return 0;
1100}
1101
1102int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1103 struct sg_table *sgt, void *buf, size_t len,
1104 enum dma_data_direction dir)
1105{
1106 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1107}
1108
1109static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1110 struct device *dev, struct sg_table *sgt,
1111 enum dma_data_direction dir,
1112 unsigned long attrs)
1113{
1114 if (sgt->orig_nents) {
1115 dma_unmap_sgtable(dev, sgt, dir, attrs);
1116 sg_free_table(sgt);
1117 sgt->orig_nents = 0;
1118 sgt->nents = 0;
1119 }
1120}
1121
1122void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1123 struct sg_table *sgt, enum dma_data_direction dir)
1124{
1125 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1126}
1127
1128static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1129{
1130 struct device *tx_dev, *rx_dev;
1131 struct spi_transfer *xfer;
1132 int ret;
1133
1134 if (!ctlr->can_dma)
1135 return 0;
1136
1137 if (ctlr->dma_tx)
1138 tx_dev = ctlr->dma_tx->device->dev;
1139 else if (ctlr->dma_map_dev)
1140 tx_dev = ctlr->dma_map_dev;
1141 else
1142 tx_dev = ctlr->dev.parent;
1143
1144 if (ctlr->dma_rx)
1145 rx_dev = ctlr->dma_rx->device->dev;
1146 else if (ctlr->dma_map_dev)
1147 rx_dev = ctlr->dma_map_dev;
1148 else
1149 rx_dev = ctlr->dev.parent;
1150
1151 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1152 /* The sync is done before each transfer. */
1153 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1154
1155 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1156 continue;
1157
1158 if (xfer->tx_buf != NULL) {
1159 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1160 (void *)xfer->tx_buf,
1161 xfer->len, DMA_TO_DEVICE,
1162 attrs);
1163 if (ret != 0)
1164 return ret;
1165 }
1166
1167 if (xfer->rx_buf != NULL) {
1168 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1169 xfer->rx_buf, xfer->len,
1170 DMA_FROM_DEVICE, attrs);
1171 if (ret != 0) {
1172 spi_unmap_buf_attrs(ctlr, tx_dev,
1173 &xfer->tx_sg, DMA_TO_DEVICE,
1174 attrs);
1175
1176 return ret;
1177 }
1178 }
1179 }
1180
1181 ctlr->cur_rx_dma_dev = rx_dev;
1182 ctlr->cur_tx_dma_dev = tx_dev;
1183 ctlr->cur_msg_mapped = true;
1184
1185 return 0;
1186}
1187
1188static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1189{
1190 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1191 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1192 struct spi_transfer *xfer;
1193
1194 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1195 return 0;
1196
1197 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1198 /* The sync has already been done after each transfer. */
1199 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1200
1201 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1202 continue;
1203
1204 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1205 DMA_FROM_DEVICE, attrs);
1206 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1207 DMA_TO_DEVICE, attrs);
1208 }
1209
1210 ctlr->cur_msg_mapped = false;
1211
1212 return 0;
1213}
1214
1215static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1216 struct spi_transfer *xfer)
1217{
1218 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1219 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1220
1221 if (!ctlr->cur_msg_mapped)
1222 return;
1223
1224 if (xfer->tx_sg.orig_nents)
1225 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1226 if (xfer->rx_sg.orig_nents)
1227 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1228}
1229
1230static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1231 struct spi_transfer *xfer)
1232{
1233 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1234 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1235
1236 if (!ctlr->cur_msg_mapped)
1237 return;
1238
1239 if (xfer->rx_sg.orig_nents)
1240 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1241 if (xfer->tx_sg.orig_nents)
1242 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1243}
1244#else /* !CONFIG_HAS_DMA */
1245static inline int __spi_map_msg(struct spi_controller *ctlr,
1246 struct spi_message *msg)
1247{
1248 return 0;
1249}
1250
1251static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1252 struct spi_message *msg)
1253{
1254 return 0;
1255}
1256
1257static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1258 struct spi_transfer *xfer)
1259{
1260}
1261
1262static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1263 struct spi_transfer *xfer)
1264{
1265}
1266#endif /* !CONFIG_HAS_DMA */
1267
1268static inline int spi_unmap_msg(struct spi_controller *ctlr,
1269 struct spi_message *msg)
1270{
1271 struct spi_transfer *xfer;
1272
1273 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1274 /*
1275 * Restore the original value of tx_buf or rx_buf if they are
1276 * NULL.
1277 */
1278 if (xfer->tx_buf == ctlr->dummy_tx)
1279 xfer->tx_buf = NULL;
1280 if (xfer->rx_buf == ctlr->dummy_rx)
1281 xfer->rx_buf = NULL;
1282 }
1283
1284 return __spi_unmap_msg(ctlr, msg);
1285}
1286
1287static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1288{
1289 struct spi_transfer *xfer;
1290 void *tmp;
1291 unsigned int max_tx, max_rx;
1292
1293 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1294 && !(msg->spi->mode & SPI_3WIRE)) {
1295 max_tx = 0;
1296 max_rx = 0;
1297
1298 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1299 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1300 !xfer->tx_buf)
1301 max_tx = max(xfer->len, max_tx);
1302 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1303 !xfer->rx_buf)
1304 max_rx = max(xfer->len, max_rx);
1305 }
1306
1307 if (max_tx) {
1308 tmp = krealloc(ctlr->dummy_tx, max_tx,
1309 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1310 if (!tmp)
1311 return -ENOMEM;
1312 ctlr->dummy_tx = tmp;
1313 }
1314
1315 if (max_rx) {
1316 tmp = krealloc(ctlr->dummy_rx, max_rx,
1317 GFP_KERNEL | GFP_DMA);
1318 if (!tmp)
1319 return -ENOMEM;
1320 ctlr->dummy_rx = tmp;
1321 }
1322
1323 if (max_tx || max_rx) {
1324 list_for_each_entry(xfer, &msg->transfers,
1325 transfer_list) {
1326 if (!xfer->len)
1327 continue;
1328 if (!xfer->tx_buf)
1329 xfer->tx_buf = ctlr->dummy_tx;
1330 if (!xfer->rx_buf)
1331 xfer->rx_buf = ctlr->dummy_rx;
1332 }
1333 }
1334 }
1335
1336 return __spi_map_msg(ctlr, msg);
1337}
1338
1339static int spi_transfer_wait(struct spi_controller *ctlr,
1340 struct spi_message *msg,
1341 struct spi_transfer *xfer)
1342{
1343 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1344 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1345 u32 speed_hz = xfer->speed_hz;
1346 unsigned long long ms;
1347
1348 if (spi_controller_is_slave(ctlr)) {
1349 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1350 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1351 return -EINTR;
1352 }
1353 } else {
1354 if (!speed_hz)
1355 speed_hz = 100000;
1356
1357 /*
1358 * For each byte we wait for 8 cycles of the SPI clock.
1359 * Since speed is defined in Hz and we want milliseconds,
1360 * use respective multiplier, but before the division,
1361 * otherwise we may get 0 for short transfers.
1362 */
1363 ms = 8LL * MSEC_PER_SEC * xfer->len;
1364 do_div(ms, speed_hz);
1365
1366 /*
1367 * Increase it twice and add 200 ms tolerance, use
1368 * predefined maximum in case of overflow.
1369 */
1370 ms += ms + 200;
1371 if (ms > UINT_MAX)
1372 ms = UINT_MAX;
1373
1374 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1375 msecs_to_jiffies(ms));
1376
1377 if (ms == 0) {
1378 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1379 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1380 dev_err(&msg->spi->dev,
1381 "SPI transfer timed out\n");
1382 return -ETIMEDOUT;
1383 }
1384 }
1385
1386 return 0;
1387}
1388
1389static void _spi_transfer_delay_ns(u32 ns)
1390{
1391 if (!ns)
1392 return;
1393 if (ns <= NSEC_PER_USEC) {
1394 ndelay(ns);
1395 } else {
1396 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1397
1398 if (us <= 10)
1399 udelay(us);
1400 else
1401 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1402 }
1403}
1404
1405int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1406{
1407 u32 delay = _delay->value;
1408 u32 unit = _delay->unit;
1409 u32 hz;
1410
1411 if (!delay)
1412 return 0;
1413
1414 switch (unit) {
1415 case SPI_DELAY_UNIT_USECS:
1416 delay *= NSEC_PER_USEC;
1417 break;
1418 case SPI_DELAY_UNIT_NSECS:
1419 /* Nothing to do here */
1420 break;
1421 case SPI_DELAY_UNIT_SCK:
1422 /* Clock cycles need to be obtained from spi_transfer */
1423 if (!xfer)
1424 return -EINVAL;
1425 /*
1426 * If there is unknown effective speed, approximate it
1427 * by underestimating with half of the requested hz.
1428 */
1429 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1430 if (!hz)
1431 return -EINVAL;
1432
1433 /* Convert delay to nanoseconds */
1434 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1435 break;
1436 default:
1437 return -EINVAL;
1438 }
1439
1440 return delay;
1441}
1442EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1443
1444int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1445{
1446 int delay;
1447
1448 might_sleep();
1449
1450 if (!_delay)
1451 return -EINVAL;
1452
1453 delay = spi_delay_to_ns(_delay, xfer);
1454 if (delay < 0)
1455 return delay;
1456
1457 _spi_transfer_delay_ns(delay);
1458
1459 return 0;
1460}
1461EXPORT_SYMBOL_GPL(spi_delay_exec);
1462
1463static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1464 struct spi_transfer *xfer)
1465{
1466 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1467 u32 delay = xfer->cs_change_delay.value;
1468 u32 unit = xfer->cs_change_delay.unit;
1469 int ret;
1470
1471 /* Return early on "fast" mode - for everything but USECS */
1472 if (!delay) {
1473 if (unit == SPI_DELAY_UNIT_USECS)
1474 _spi_transfer_delay_ns(default_delay_ns);
1475 return;
1476 }
1477
1478 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1479 if (ret) {
1480 dev_err_once(&msg->spi->dev,
1481 "Use of unsupported delay unit %i, using default of %luus\n",
1482 unit, default_delay_ns / NSEC_PER_USEC);
1483 _spi_transfer_delay_ns(default_delay_ns);
1484 }
1485}
1486
1487/*
1488 * spi_transfer_one_message - Default implementation of transfer_one_message()
1489 *
1490 * This is a standard implementation of transfer_one_message() for
1491 * drivers which implement a transfer_one() operation. It provides
1492 * standard handling of delays and chip select management.
1493 */
1494static int spi_transfer_one_message(struct spi_controller *ctlr,
1495 struct spi_message *msg)
1496{
1497 struct spi_transfer *xfer;
1498 bool keep_cs = false;
1499 int ret = 0;
1500 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1501 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1502
1503 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1504 spi_set_cs(msg->spi, !xfer->cs_off, false);
1505
1506 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1507 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1508
1509 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1510 trace_spi_transfer_start(msg, xfer);
1511
1512 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1513 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1514
1515 if (!ctlr->ptp_sts_supported) {
1516 xfer->ptp_sts_word_pre = 0;
1517 ptp_read_system_prets(xfer->ptp_sts);
1518 }
1519
1520 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1521 reinit_completion(&ctlr->xfer_completion);
1522
1523fallback_pio:
1524 spi_dma_sync_for_device(ctlr, xfer);
1525 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1526 if (ret < 0) {
1527 spi_dma_sync_for_cpu(ctlr, xfer);
1528
1529 if (ctlr->cur_msg_mapped &&
1530 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1531 __spi_unmap_msg(ctlr, msg);
1532 ctlr->fallback = true;
1533 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1534 goto fallback_pio;
1535 }
1536
1537 SPI_STATISTICS_INCREMENT_FIELD(statm,
1538 errors);
1539 SPI_STATISTICS_INCREMENT_FIELD(stats,
1540 errors);
1541 dev_err(&msg->spi->dev,
1542 "SPI transfer failed: %d\n", ret);
1543 goto out;
1544 }
1545
1546 if (ret > 0) {
1547 ret = spi_transfer_wait(ctlr, msg, xfer);
1548 if (ret < 0)
1549 msg->status = ret;
1550 }
1551
1552 spi_dma_sync_for_cpu(ctlr, xfer);
1553 } else {
1554 if (xfer->len)
1555 dev_err(&msg->spi->dev,
1556 "Bufferless transfer has length %u\n",
1557 xfer->len);
1558 }
1559
1560 if (!ctlr->ptp_sts_supported) {
1561 ptp_read_system_postts(xfer->ptp_sts);
1562 xfer->ptp_sts_word_post = xfer->len;
1563 }
1564
1565 trace_spi_transfer_stop(msg, xfer);
1566
1567 if (msg->status != -EINPROGRESS)
1568 goto out;
1569
1570 spi_transfer_delay_exec(xfer);
1571
1572 if (xfer->cs_change) {
1573 if (list_is_last(&xfer->transfer_list,
1574 &msg->transfers)) {
1575 keep_cs = true;
1576 } else {
1577 if (!xfer->cs_off)
1578 spi_set_cs(msg->spi, false, false);
1579 _spi_transfer_cs_change_delay(msg, xfer);
1580 if (!list_next_entry(xfer, transfer_list)->cs_off)
1581 spi_set_cs(msg->spi, true, false);
1582 }
1583 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1584 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1585 spi_set_cs(msg->spi, xfer->cs_off, false);
1586 }
1587
1588 msg->actual_length += xfer->len;
1589 }
1590
1591out:
1592 if (ret != 0 || !keep_cs)
1593 spi_set_cs(msg->spi, false, false);
1594
1595 if (msg->status == -EINPROGRESS)
1596 msg->status = ret;
1597
1598 if (msg->status && ctlr->handle_err)
1599 ctlr->handle_err(ctlr, msg);
1600
1601 spi_finalize_current_message(ctlr);
1602
1603 return ret;
1604}
1605
1606/**
1607 * spi_finalize_current_transfer - report completion of a transfer
1608 * @ctlr: the controller reporting completion
1609 *
1610 * Called by SPI drivers using the core transfer_one_message()
1611 * implementation to notify it that the current interrupt driven
1612 * transfer has finished and the next one may be scheduled.
1613 */
1614void spi_finalize_current_transfer(struct spi_controller *ctlr)
1615{
1616 complete(&ctlr->xfer_completion);
1617}
1618EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1619
1620static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1621{
1622 if (ctlr->auto_runtime_pm) {
1623 pm_runtime_mark_last_busy(ctlr->dev.parent);
1624 pm_runtime_put_autosuspend(ctlr->dev.parent);
1625 }
1626}
1627
1628static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1629 struct spi_message *msg, bool was_busy)
1630{
1631 struct spi_transfer *xfer;
1632 int ret;
1633
1634 if (!was_busy && ctlr->auto_runtime_pm) {
1635 ret = pm_runtime_get_sync(ctlr->dev.parent);
1636 if (ret < 0) {
1637 pm_runtime_put_noidle(ctlr->dev.parent);
1638 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1639 ret);
1640 return ret;
1641 }
1642 }
1643
1644 if (!was_busy)
1645 trace_spi_controller_busy(ctlr);
1646
1647 if (!was_busy && ctlr->prepare_transfer_hardware) {
1648 ret = ctlr->prepare_transfer_hardware(ctlr);
1649 if (ret) {
1650 dev_err(&ctlr->dev,
1651 "failed to prepare transfer hardware: %d\n",
1652 ret);
1653
1654 if (ctlr->auto_runtime_pm)
1655 pm_runtime_put(ctlr->dev.parent);
1656
1657 msg->status = ret;
1658 spi_finalize_current_message(ctlr);
1659
1660 return ret;
1661 }
1662 }
1663
1664 trace_spi_message_start(msg);
1665
1666 ret = spi_split_transfers_maxsize(ctlr, msg,
1667 spi_max_transfer_size(msg->spi),
1668 GFP_KERNEL | GFP_DMA);
1669 if (ret) {
1670 msg->status = ret;
1671 spi_finalize_current_message(ctlr);
1672 return ret;
1673 }
1674
1675 if (ctlr->prepare_message) {
1676 ret = ctlr->prepare_message(ctlr, msg);
1677 if (ret) {
1678 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1679 ret);
1680 msg->status = ret;
1681 spi_finalize_current_message(ctlr);
1682 return ret;
1683 }
1684 msg->prepared = true;
1685 }
1686
1687 ret = spi_map_msg(ctlr, msg);
1688 if (ret) {
1689 msg->status = ret;
1690 spi_finalize_current_message(ctlr);
1691 return ret;
1692 }
1693
1694 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1695 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1696 xfer->ptp_sts_word_pre = 0;
1697 ptp_read_system_prets(xfer->ptp_sts);
1698 }
1699 }
1700
1701 /*
1702 * Drivers implementation of transfer_one_message() must arrange for
1703 * spi_finalize_current_message() to get called. Most drivers will do
1704 * this in the calling context, but some don't. For those cases, a
1705 * completion is used to guarantee that this function does not return
1706 * until spi_finalize_current_message() is done accessing
1707 * ctlr->cur_msg.
1708 * Use of the following two flags enable to opportunistically skip the
1709 * use of the completion since its use involves expensive spin locks.
1710 * In case of a race with the context that calls
1711 * spi_finalize_current_message() the completion will always be used,
1712 * due to strict ordering of these flags using barriers.
1713 */
1714 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1715 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1716 reinit_completion(&ctlr->cur_msg_completion);
1717 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1718
1719 ret = ctlr->transfer_one_message(ctlr, msg);
1720 if (ret) {
1721 dev_err(&ctlr->dev,
1722 "failed to transfer one message from queue\n");
1723 return ret;
1724 }
1725
1726 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1727 smp_mb(); /* See spi_finalize_current_message()... */
1728 if (READ_ONCE(ctlr->cur_msg_incomplete))
1729 wait_for_completion(&ctlr->cur_msg_completion);
1730
1731 return 0;
1732}
1733
1734/**
1735 * __spi_pump_messages - function which processes spi message queue
1736 * @ctlr: controller to process queue for
1737 * @in_kthread: true if we are in the context of the message pump thread
1738 *
1739 * This function checks if there is any spi message in the queue that
1740 * needs processing and if so call out to the driver to initialize hardware
1741 * and transfer each message.
1742 *
1743 * Note that it is called both from the kthread itself and also from
1744 * inside spi_sync(); the queue extraction handling at the top of the
1745 * function should deal with this safely.
1746 */
1747static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1748{
1749 struct spi_message *msg;
1750 bool was_busy = false;
1751 unsigned long flags;
1752 int ret;
1753
1754 /* Take the IO mutex */
1755 mutex_lock(&ctlr->io_mutex);
1756
1757 /* Lock queue */
1758 spin_lock_irqsave(&ctlr->queue_lock, flags);
1759
1760 /* Make sure we are not already running a message */
1761 if (ctlr->cur_msg)
1762 goto out_unlock;
1763
1764 /* Check if the queue is idle */
1765 if (list_empty(&ctlr->queue) || !ctlr->running) {
1766 if (!ctlr->busy)
1767 goto out_unlock;
1768
1769 /* Defer any non-atomic teardown to the thread */
1770 if (!in_kthread) {
1771 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1772 !ctlr->unprepare_transfer_hardware) {
1773 spi_idle_runtime_pm(ctlr);
1774 ctlr->busy = false;
1775 ctlr->queue_empty = true;
1776 trace_spi_controller_idle(ctlr);
1777 } else {
1778 kthread_queue_work(ctlr->kworker,
1779 &ctlr->pump_messages);
1780 }
1781 goto out_unlock;
1782 }
1783
1784 ctlr->busy = false;
1785 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1786
1787 kfree(ctlr->dummy_rx);
1788 ctlr->dummy_rx = NULL;
1789 kfree(ctlr->dummy_tx);
1790 ctlr->dummy_tx = NULL;
1791 if (ctlr->unprepare_transfer_hardware &&
1792 ctlr->unprepare_transfer_hardware(ctlr))
1793 dev_err(&ctlr->dev,
1794 "failed to unprepare transfer hardware\n");
1795 spi_idle_runtime_pm(ctlr);
1796 trace_spi_controller_idle(ctlr);
1797
1798 spin_lock_irqsave(&ctlr->queue_lock, flags);
1799 ctlr->queue_empty = true;
1800 goto out_unlock;
1801 }
1802
1803 /* Extract head of queue */
1804 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1805 ctlr->cur_msg = msg;
1806
1807 list_del_init(&msg->queue);
1808 if (ctlr->busy)
1809 was_busy = true;
1810 else
1811 ctlr->busy = true;
1812 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1813
1814 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1815 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1816
1817 ctlr->cur_msg = NULL;
1818 ctlr->fallback = false;
1819
1820 mutex_unlock(&ctlr->io_mutex);
1821
1822 /* Prod the scheduler in case transfer_one() was busy waiting */
1823 if (!ret)
1824 cond_resched();
1825 return;
1826
1827out_unlock:
1828 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1829 mutex_unlock(&ctlr->io_mutex);
1830}
1831
1832/**
1833 * spi_pump_messages - kthread work function which processes spi message queue
1834 * @work: pointer to kthread work struct contained in the controller struct
1835 */
1836static void spi_pump_messages(struct kthread_work *work)
1837{
1838 struct spi_controller *ctlr =
1839 container_of(work, struct spi_controller, pump_messages);
1840
1841 __spi_pump_messages(ctlr, true);
1842}
1843
1844/**
1845 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1846 * @ctlr: Pointer to the spi_controller structure of the driver
1847 * @xfer: Pointer to the transfer being timestamped
1848 * @progress: How many words (not bytes) have been transferred so far
1849 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1850 * transfer, for less jitter in time measurement. Only compatible
1851 * with PIO drivers. If true, must follow up with
1852 * spi_take_timestamp_post or otherwise system will crash.
1853 * WARNING: for fully predictable results, the CPU frequency must
1854 * also be under control (governor).
1855 *
1856 * This is a helper for drivers to collect the beginning of the TX timestamp
1857 * for the requested byte from the SPI transfer. The frequency with which this
1858 * function must be called (once per word, once for the whole transfer, once
1859 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1860 * greater than or equal to the requested byte at the time of the call. The
1861 * timestamp is only taken once, at the first such call. It is assumed that
1862 * the driver advances its @tx buffer pointer monotonically.
1863 */
1864void spi_take_timestamp_pre(struct spi_controller *ctlr,
1865 struct spi_transfer *xfer,
1866 size_t progress, bool irqs_off)
1867{
1868 if (!xfer->ptp_sts)
1869 return;
1870
1871 if (xfer->timestamped)
1872 return;
1873
1874 if (progress > xfer->ptp_sts_word_pre)
1875 return;
1876
1877 /* Capture the resolution of the timestamp */
1878 xfer->ptp_sts_word_pre = progress;
1879
1880 if (irqs_off) {
1881 local_irq_save(ctlr->irq_flags);
1882 preempt_disable();
1883 }
1884
1885 ptp_read_system_prets(xfer->ptp_sts);
1886}
1887EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1888
1889/**
1890 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1891 * @ctlr: Pointer to the spi_controller structure of the driver
1892 * @xfer: Pointer to the transfer being timestamped
1893 * @progress: How many words (not bytes) have been transferred so far
1894 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1895 *
1896 * This is a helper for drivers to collect the end of the TX timestamp for
1897 * the requested byte from the SPI transfer. Can be called with an arbitrary
1898 * frequency: only the first call where @tx exceeds or is equal to the
1899 * requested word will be timestamped.
1900 */
1901void spi_take_timestamp_post(struct spi_controller *ctlr,
1902 struct spi_transfer *xfer,
1903 size_t progress, bool irqs_off)
1904{
1905 if (!xfer->ptp_sts)
1906 return;
1907
1908 if (xfer->timestamped)
1909 return;
1910
1911 if (progress < xfer->ptp_sts_word_post)
1912 return;
1913
1914 ptp_read_system_postts(xfer->ptp_sts);
1915
1916 if (irqs_off) {
1917 local_irq_restore(ctlr->irq_flags);
1918 preempt_enable();
1919 }
1920
1921 /* Capture the resolution of the timestamp */
1922 xfer->ptp_sts_word_post = progress;
1923
1924 xfer->timestamped = true;
1925}
1926EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1927
1928/**
1929 * spi_set_thread_rt - set the controller to pump at realtime priority
1930 * @ctlr: controller to boost priority of
1931 *
1932 * This can be called because the controller requested realtime priority
1933 * (by setting the ->rt value before calling spi_register_controller()) or
1934 * because a device on the bus said that its transfers needed realtime
1935 * priority.
1936 *
1937 * NOTE: at the moment if any device on a bus says it needs realtime then
1938 * the thread will be at realtime priority for all transfers on that
1939 * controller. If this eventually becomes a problem we may see if we can
1940 * find a way to boost the priority only temporarily during relevant
1941 * transfers.
1942 */
1943static void spi_set_thread_rt(struct spi_controller *ctlr)
1944{
1945 dev_info(&ctlr->dev,
1946 "will run message pump with realtime priority\n");
1947 sched_set_fifo(ctlr->kworker->task);
1948}
1949
1950static int spi_init_queue(struct spi_controller *ctlr)
1951{
1952 ctlr->running = false;
1953 ctlr->busy = false;
1954 ctlr->queue_empty = true;
1955
1956 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1957 if (IS_ERR(ctlr->kworker)) {
1958 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1959 return PTR_ERR(ctlr->kworker);
1960 }
1961
1962 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1963
1964 /*
1965 * Controller config will indicate if this controller should run the
1966 * message pump with high (realtime) priority to reduce the transfer
1967 * latency on the bus by minimising the delay between a transfer
1968 * request and the scheduling of the message pump thread. Without this
1969 * setting the message pump thread will remain at default priority.
1970 */
1971 if (ctlr->rt)
1972 spi_set_thread_rt(ctlr);
1973
1974 return 0;
1975}
1976
1977/**
1978 * spi_get_next_queued_message() - called by driver to check for queued
1979 * messages
1980 * @ctlr: the controller to check for queued messages
1981 *
1982 * If there are more messages in the queue, the next message is returned from
1983 * this call.
1984 *
1985 * Return: the next message in the queue, else NULL if the queue is empty.
1986 */
1987struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1988{
1989 struct spi_message *next;
1990 unsigned long flags;
1991
1992 /* Get a pointer to the next message, if any */
1993 spin_lock_irqsave(&ctlr->queue_lock, flags);
1994 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1995 queue);
1996 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1997
1998 return next;
1999}
2000EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2001
2002/**
2003 * spi_finalize_current_message() - the current message is complete
2004 * @ctlr: the controller to return the message to
2005 *
2006 * Called by the driver to notify the core that the message in the front of the
2007 * queue is complete and can be removed from the queue.
2008 */
2009void spi_finalize_current_message(struct spi_controller *ctlr)
2010{
2011 struct spi_transfer *xfer;
2012 struct spi_message *mesg;
2013 int ret;
2014
2015 mesg = ctlr->cur_msg;
2016
2017 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2018 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2019 ptp_read_system_postts(xfer->ptp_sts);
2020 xfer->ptp_sts_word_post = xfer->len;
2021 }
2022 }
2023
2024 if (unlikely(ctlr->ptp_sts_supported))
2025 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2026 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2027
2028 spi_unmap_msg(ctlr, mesg);
2029
2030 /*
2031 * In the prepare_messages callback the SPI bus has the opportunity
2032 * to split a transfer to smaller chunks.
2033 *
2034 * Release the split transfers here since spi_map_msg() is done on
2035 * the split transfers.
2036 */
2037 spi_res_release(ctlr, mesg);
2038
2039 if (mesg->prepared && ctlr->unprepare_message) {
2040 ret = ctlr->unprepare_message(ctlr, mesg);
2041 if (ret) {
2042 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2043 ret);
2044 }
2045 }
2046
2047 mesg->prepared = false;
2048
2049 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2050 smp_mb(); /* See __spi_pump_transfer_message()... */
2051 if (READ_ONCE(ctlr->cur_msg_need_completion))
2052 complete(&ctlr->cur_msg_completion);
2053
2054 trace_spi_message_done(mesg);
2055
2056 mesg->state = NULL;
2057 if (mesg->complete)
2058 mesg->complete(mesg->context);
2059}
2060EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2061
2062static int spi_start_queue(struct spi_controller *ctlr)
2063{
2064 unsigned long flags;
2065
2066 spin_lock_irqsave(&ctlr->queue_lock, flags);
2067
2068 if (ctlr->running || ctlr->busy) {
2069 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2070 return -EBUSY;
2071 }
2072
2073 ctlr->running = true;
2074 ctlr->cur_msg = NULL;
2075 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2076
2077 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2078
2079 return 0;
2080}
2081
2082static int spi_stop_queue(struct spi_controller *ctlr)
2083{
2084 unsigned long flags;
2085 unsigned limit = 500;
2086 int ret = 0;
2087
2088 spin_lock_irqsave(&ctlr->queue_lock, flags);
2089
2090 /*
2091 * This is a bit lame, but is optimized for the common execution path.
2092 * A wait_queue on the ctlr->busy could be used, but then the common
2093 * execution path (pump_messages) would be required to call wake_up or
2094 * friends on every SPI message. Do this instead.
2095 */
2096 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2097 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2098 usleep_range(10000, 11000);
2099 spin_lock_irqsave(&ctlr->queue_lock, flags);
2100 }
2101
2102 if (!list_empty(&ctlr->queue) || ctlr->busy)
2103 ret = -EBUSY;
2104 else
2105 ctlr->running = false;
2106
2107 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2108
2109 if (ret) {
2110 dev_warn(&ctlr->dev, "could not stop message queue\n");
2111 return ret;
2112 }
2113 return ret;
2114}
2115
2116static int spi_destroy_queue(struct spi_controller *ctlr)
2117{
2118 int ret;
2119
2120 ret = spi_stop_queue(ctlr);
2121
2122 /*
2123 * kthread_flush_worker will block until all work is done.
2124 * If the reason that stop_queue timed out is that the work will never
2125 * finish, then it does no good to call flush/stop thread, so
2126 * return anyway.
2127 */
2128 if (ret) {
2129 dev_err(&ctlr->dev, "problem destroying queue\n");
2130 return ret;
2131 }
2132
2133 kthread_destroy_worker(ctlr->kworker);
2134
2135 return 0;
2136}
2137
2138static int __spi_queued_transfer(struct spi_device *spi,
2139 struct spi_message *msg,
2140 bool need_pump)
2141{
2142 struct spi_controller *ctlr = spi->controller;
2143 unsigned long flags;
2144
2145 spin_lock_irqsave(&ctlr->queue_lock, flags);
2146
2147 if (!ctlr->running) {
2148 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2149 return -ESHUTDOWN;
2150 }
2151 msg->actual_length = 0;
2152 msg->status = -EINPROGRESS;
2153
2154 list_add_tail(&msg->queue, &ctlr->queue);
2155 ctlr->queue_empty = false;
2156 if (!ctlr->busy && need_pump)
2157 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2158
2159 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2160 return 0;
2161}
2162
2163/**
2164 * spi_queued_transfer - transfer function for queued transfers
2165 * @spi: spi device which is requesting transfer
2166 * @msg: spi message which is to handled is queued to driver queue
2167 *
2168 * Return: zero on success, else a negative error code.
2169 */
2170static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2171{
2172 return __spi_queued_transfer(spi, msg, true);
2173}
2174
2175static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2176{
2177 int ret;
2178
2179 ctlr->transfer = spi_queued_transfer;
2180 if (!ctlr->transfer_one_message)
2181 ctlr->transfer_one_message = spi_transfer_one_message;
2182
2183 /* Initialize and start queue */
2184 ret = spi_init_queue(ctlr);
2185 if (ret) {
2186 dev_err(&ctlr->dev, "problem initializing queue\n");
2187 goto err_init_queue;
2188 }
2189 ctlr->queued = true;
2190 ret = spi_start_queue(ctlr);
2191 if (ret) {
2192 dev_err(&ctlr->dev, "problem starting queue\n");
2193 goto err_start_queue;
2194 }
2195
2196 return 0;
2197
2198err_start_queue:
2199 spi_destroy_queue(ctlr);
2200err_init_queue:
2201 return ret;
2202}
2203
2204/**
2205 * spi_flush_queue - Send all pending messages in the queue from the callers'
2206 * context
2207 * @ctlr: controller to process queue for
2208 *
2209 * This should be used when one wants to ensure all pending messages have been
2210 * sent before doing something. Is used by the spi-mem code to make sure SPI
2211 * memory operations do not preempt regular SPI transfers that have been queued
2212 * before the spi-mem operation.
2213 */
2214void spi_flush_queue(struct spi_controller *ctlr)
2215{
2216 if (ctlr->transfer == spi_queued_transfer)
2217 __spi_pump_messages(ctlr, false);
2218}
2219
2220/*-------------------------------------------------------------------------*/
2221
2222#if defined(CONFIG_OF)
2223static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2224 struct spi_delay *delay, const char *prop)
2225{
2226 u32 value;
2227
2228 if (!of_property_read_u32(nc, prop, &value)) {
2229 if (value > U16_MAX) {
2230 delay->value = DIV_ROUND_UP(value, 1000);
2231 delay->unit = SPI_DELAY_UNIT_USECS;
2232 } else {
2233 delay->value = value;
2234 delay->unit = SPI_DELAY_UNIT_NSECS;
2235 }
2236 }
2237}
2238
2239static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2240 struct device_node *nc)
2241{
2242 u32 value;
2243 int rc;
2244
2245 /* Mode (clock phase/polarity/etc.) */
2246 if (of_property_read_bool(nc, "spi-cpha"))
2247 spi->mode |= SPI_CPHA;
2248 if (of_property_read_bool(nc, "spi-cpol"))
2249 spi->mode |= SPI_CPOL;
2250 if (of_property_read_bool(nc, "spi-3wire"))
2251 spi->mode |= SPI_3WIRE;
2252 if (of_property_read_bool(nc, "spi-lsb-first"))
2253 spi->mode |= SPI_LSB_FIRST;
2254 if (of_property_read_bool(nc, "spi-cs-high"))
2255 spi->mode |= SPI_CS_HIGH;
2256
2257 /* Device DUAL/QUAD mode */
2258 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2259 switch (value) {
2260 case 0:
2261 spi->mode |= SPI_NO_TX;
2262 break;
2263 case 1:
2264 break;
2265 case 2:
2266 spi->mode |= SPI_TX_DUAL;
2267 break;
2268 case 4:
2269 spi->mode |= SPI_TX_QUAD;
2270 break;
2271 case 8:
2272 spi->mode |= SPI_TX_OCTAL;
2273 break;
2274 default:
2275 dev_warn(&ctlr->dev,
2276 "spi-tx-bus-width %d not supported\n",
2277 value);
2278 break;
2279 }
2280 }
2281
2282 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2283 switch (value) {
2284 case 0:
2285 spi->mode |= SPI_NO_RX;
2286 break;
2287 case 1:
2288 break;
2289 case 2:
2290 spi->mode |= SPI_RX_DUAL;
2291 break;
2292 case 4:
2293 spi->mode |= SPI_RX_QUAD;
2294 break;
2295 case 8:
2296 spi->mode |= SPI_RX_OCTAL;
2297 break;
2298 default:
2299 dev_warn(&ctlr->dev,
2300 "spi-rx-bus-width %d not supported\n",
2301 value);
2302 break;
2303 }
2304 }
2305
2306 if (spi_controller_is_slave(ctlr)) {
2307 if (!of_node_name_eq(nc, "slave")) {
2308 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2309 nc);
2310 return -EINVAL;
2311 }
2312 return 0;
2313 }
2314
2315 /* Device address */
2316 rc = of_property_read_u32(nc, "reg", &value);
2317 if (rc) {
2318 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2319 nc, rc);
2320 return rc;
2321 }
2322 spi->chip_select = value;
2323
2324 /* Device speed */
2325 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2326 spi->max_speed_hz = value;
2327
2328 /* Device CS delays */
2329 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2330
2331 return 0;
2332}
2333
2334static struct spi_device *
2335of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2336{
2337 struct spi_device *spi;
2338 int rc;
2339
2340 /* Alloc an spi_device */
2341 spi = spi_alloc_device(ctlr);
2342 if (!spi) {
2343 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2344 rc = -ENOMEM;
2345 goto err_out;
2346 }
2347
2348 /* Select device driver */
2349 rc = of_modalias_node(nc, spi->modalias,
2350 sizeof(spi->modalias));
2351 if (rc < 0) {
2352 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2353 goto err_out;
2354 }
2355
2356 rc = of_spi_parse_dt(ctlr, spi, nc);
2357 if (rc)
2358 goto err_out;
2359
2360 /* Store a pointer to the node in the device structure */
2361 of_node_get(nc);
2362 spi->dev.of_node = nc;
2363 spi->dev.fwnode = of_fwnode_handle(nc);
2364
2365 /* Register the new device */
2366 rc = spi_add_device(spi);
2367 if (rc) {
2368 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2369 goto err_of_node_put;
2370 }
2371
2372 return spi;
2373
2374err_of_node_put:
2375 of_node_put(nc);
2376err_out:
2377 spi_dev_put(spi);
2378 return ERR_PTR(rc);
2379}
2380
2381/**
2382 * of_register_spi_devices() - Register child devices onto the SPI bus
2383 * @ctlr: Pointer to spi_controller device
2384 *
2385 * Registers an spi_device for each child node of controller node which
2386 * represents a valid SPI slave.
2387 */
2388static void of_register_spi_devices(struct spi_controller *ctlr)
2389{
2390 struct spi_device *spi;
2391 struct device_node *nc;
2392
2393 if (!ctlr->dev.of_node)
2394 return;
2395
2396 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2397 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2398 continue;
2399 spi = of_register_spi_device(ctlr, nc);
2400 if (IS_ERR(spi)) {
2401 dev_warn(&ctlr->dev,
2402 "Failed to create SPI device for %pOF\n", nc);
2403 of_node_clear_flag(nc, OF_POPULATED);
2404 }
2405 }
2406}
2407#else
2408static void of_register_spi_devices(struct spi_controller *ctlr) { }
2409#endif
2410
2411/**
2412 * spi_new_ancillary_device() - Register ancillary SPI device
2413 * @spi: Pointer to the main SPI device registering the ancillary device
2414 * @chip_select: Chip Select of the ancillary device
2415 *
2416 * Register an ancillary SPI device; for example some chips have a chip-select
2417 * for normal device usage and another one for setup/firmware upload.
2418 *
2419 * This may only be called from main SPI device's probe routine.
2420 *
2421 * Return: 0 on success; negative errno on failure
2422 */
2423struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2424 u8 chip_select)
2425{
2426 struct spi_device *ancillary;
2427 int rc = 0;
2428
2429 /* Alloc an spi_device */
2430 ancillary = spi_alloc_device(spi->controller);
2431 if (!ancillary) {
2432 rc = -ENOMEM;
2433 goto err_out;
2434 }
2435
2436 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2437
2438 /* Use provided chip-select for ancillary device */
2439 ancillary->chip_select = chip_select;
2440
2441 /* Take over SPI mode/speed from SPI main device */
2442 ancillary->max_speed_hz = spi->max_speed_hz;
2443 ancillary->mode = spi->mode;
2444
2445 /* Register the new device */
2446 rc = spi_add_device_locked(ancillary);
2447 if (rc) {
2448 dev_err(&spi->dev, "failed to register ancillary device\n");
2449 goto err_out;
2450 }
2451
2452 return ancillary;
2453
2454err_out:
2455 spi_dev_put(ancillary);
2456 return ERR_PTR(rc);
2457}
2458EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2459
2460#ifdef CONFIG_ACPI
2461struct acpi_spi_lookup {
2462 struct spi_controller *ctlr;
2463 u32 max_speed_hz;
2464 u32 mode;
2465 int irq;
2466 u8 bits_per_word;
2467 u8 chip_select;
2468 int n;
2469 int index;
2470};
2471
2472static int acpi_spi_count(struct acpi_resource *ares, void *data)
2473{
2474 struct acpi_resource_spi_serialbus *sb;
2475 int *count = data;
2476
2477 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2478 return 1;
2479
2480 sb = &ares->data.spi_serial_bus;
2481 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2482 return 1;
2483
2484 *count = *count + 1;
2485
2486 return 1;
2487}
2488
2489/**
2490 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2491 * @adev: ACPI device
2492 *
2493 * Returns the number of SpiSerialBus resources in the ACPI-device's
2494 * resource-list; or a negative error code.
2495 */
2496int acpi_spi_count_resources(struct acpi_device *adev)
2497{
2498 LIST_HEAD(r);
2499 int count = 0;
2500 int ret;
2501
2502 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2503 if (ret < 0)
2504 return ret;
2505
2506 acpi_dev_free_resource_list(&r);
2507
2508 return count;
2509}
2510EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2511
2512static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2513 struct acpi_spi_lookup *lookup)
2514{
2515 const union acpi_object *obj;
2516
2517 if (!x86_apple_machine)
2518 return;
2519
2520 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2521 && obj->buffer.length >= 4)
2522 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2523
2524 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2525 && obj->buffer.length == 8)
2526 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2527
2528 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2529 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2530 lookup->mode |= SPI_LSB_FIRST;
2531
2532 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2533 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2534 lookup->mode |= SPI_CPOL;
2535
2536 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2537 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2538 lookup->mode |= SPI_CPHA;
2539}
2540
2541static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2542
2543static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2544{
2545 struct acpi_spi_lookup *lookup = data;
2546 struct spi_controller *ctlr = lookup->ctlr;
2547
2548 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2549 struct acpi_resource_spi_serialbus *sb;
2550 acpi_handle parent_handle;
2551 acpi_status status;
2552
2553 sb = &ares->data.spi_serial_bus;
2554 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2555
2556 if (lookup->index != -1 && lookup->n++ != lookup->index)
2557 return 1;
2558
2559 status = acpi_get_handle(NULL,
2560 sb->resource_source.string_ptr,
2561 &parent_handle);
2562
2563 if (ACPI_FAILURE(status))
2564 return -ENODEV;
2565
2566 if (ctlr) {
2567 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2568 return -ENODEV;
2569 } else {
2570 struct acpi_device *adev;
2571
2572 adev = acpi_fetch_acpi_dev(parent_handle);
2573 if (!adev)
2574 return -ENODEV;
2575
2576 ctlr = acpi_spi_find_controller_by_adev(adev);
2577 if (!ctlr)
2578 return -EPROBE_DEFER;
2579
2580 lookup->ctlr = ctlr;
2581 }
2582
2583 /*
2584 * ACPI DeviceSelection numbering is handled by the
2585 * host controller driver in Windows and can vary
2586 * from driver to driver. In Linux we always expect
2587 * 0 .. max - 1 so we need to ask the driver to
2588 * translate between the two schemes.
2589 */
2590 if (ctlr->fw_translate_cs) {
2591 int cs = ctlr->fw_translate_cs(ctlr,
2592 sb->device_selection);
2593 if (cs < 0)
2594 return cs;
2595 lookup->chip_select = cs;
2596 } else {
2597 lookup->chip_select = sb->device_selection;
2598 }
2599
2600 lookup->max_speed_hz = sb->connection_speed;
2601 lookup->bits_per_word = sb->data_bit_length;
2602
2603 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2604 lookup->mode |= SPI_CPHA;
2605 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2606 lookup->mode |= SPI_CPOL;
2607 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2608 lookup->mode |= SPI_CS_HIGH;
2609 }
2610 } else if (lookup->irq < 0) {
2611 struct resource r;
2612
2613 if (acpi_dev_resource_interrupt(ares, 0, &r))
2614 lookup->irq = r.start;
2615 }
2616
2617 /* Always tell the ACPI core to skip this resource */
2618 return 1;
2619}
2620
2621/**
2622 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2623 * @ctlr: controller to which the spi device belongs
2624 * @adev: ACPI Device for the spi device
2625 * @index: Index of the spi resource inside the ACPI Node
2626 *
2627 * This should be used to allocate a new spi device from and ACPI Node.
2628 * The caller is responsible for calling spi_add_device to register the spi device.
2629 *
2630 * If ctlr is set to NULL, the Controller for the spi device will be looked up
2631 * using the resource.
2632 * If index is set to -1, index is not used.
2633 * Note: If index is -1, ctlr must be set.
2634 *
2635 * Return: a pointer to the new device, or ERR_PTR on error.
2636 */
2637struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2638 struct acpi_device *adev,
2639 int index)
2640{
2641 acpi_handle parent_handle = NULL;
2642 struct list_head resource_list;
2643 struct acpi_spi_lookup lookup = {};
2644 struct spi_device *spi;
2645 int ret;
2646
2647 if (!ctlr && index == -1)
2648 return ERR_PTR(-EINVAL);
2649
2650 lookup.ctlr = ctlr;
2651 lookup.irq = -1;
2652 lookup.index = index;
2653 lookup.n = 0;
2654
2655 INIT_LIST_HEAD(&resource_list);
2656 ret = acpi_dev_get_resources(adev, &resource_list,
2657 acpi_spi_add_resource, &lookup);
2658 acpi_dev_free_resource_list(&resource_list);
2659
2660 if (ret < 0)
2661 /* Found SPI in _CRS but it points to another controller */
2662 return ERR_PTR(ret);
2663
2664 if (!lookup.max_speed_hz &&
2665 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2666 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2667 /* Apple does not use _CRS but nested devices for SPI slaves */
2668 acpi_spi_parse_apple_properties(adev, &lookup);
2669 }
2670
2671 if (!lookup.max_speed_hz)
2672 return ERR_PTR(-ENODEV);
2673
2674 spi = spi_alloc_device(lookup.ctlr);
2675 if (!spi) {
2676 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2677 dev_name(&adev->dev));
2678 return ERR_PTR(-ENOMEM);
2679 }
2680
2681 ACPI_COMPANION_SET(&spi->dev, adev);
2682 spi->max_speed_hz = lookup.max_speed_hz;
2683 spi->mode |= lookup.mode;
2684 spi->irq = lookup.irq;
2685 spi->bits_per_word = lookup.bits_per_word;
2686 spi->chip_select = lookup.chip_select;
2687
2688 return spi;
2689}
2690EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2691
2692static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2693 struct acpi_device *adev)
2694{
2695 struct spi_device *spi;
2696
2697 if (acpi_bus_get_status(adev) || !adev->status.present ||
2698 acpi_device_enumerated(adev))
2699 return AE_OK;
2700
2701 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2702 if (IS_ERR(spi)) {
2703 if (PTR_ERR(spi) == -ENOMEM)
2704 return AE_NO_MEMORY;
2705 else
2706 return AE_OK;
2707 }
2708
2709 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2710 sizeof(spi->modalias));
2711
2712 if (spi->irq < 0)
2713 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2714
2715 acpi_device_set_enumerated(adev);
2716
2717 adev->power.flags.ignore_parent = true;
2718 if (spi_add_device(spi)) {
2719 adev->power.flags.ignore_parent = false;
2720 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2721 dev_name(&adev->dev));
2722 spi_dev_put(spi);
2723 }
2724
2725 return AE_OK;
2726}
2727
2728static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2729 void *data, void **return_value)
2730{
2731 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2732 struct spi_controller *ctlr = data;
2733
2734 if (!adev)
2735 return AE_OK;
2736
2737 return acpi_register_spi_device(ctlr, adev);
2738}
2739
2740#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2741
2742static void acpi_register_spi_devices(struct spi_controller *ctlr)
2743{
2744 acpi_status status;
2745 acpi_handle handle;
2746
2747 handle = ACPI_HANDLE(ctlr->dev.parent);
2748 if (!handle)
2749 return;
2750
2751 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2752 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2753 acpi_spi_add_device, NULL, ctlr, NULL);
2754 if (ACPI_FAILURE(status))
2755 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2756}
2757#else
2758static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2759#endif /* CONFIG_ACPI */
2760
2761static void spi_controller_release(struct device *dev)
2762{
2763 struct spi_controller *ctlr;
2764
2765 ctlr = container_of(dev, struct spi_controller, dev);
2766 kfree(ctlr);
2767}
2768
2769static struct class spi_master_class = {
2770 .name = "spi_master",
2771 .owner = THIS_MODULE,
2772 .dev_release = spi_controller_release,
2773 .dev_groups = spi_master_groups,
2774};
2775
2776#ifdef CONFIG_SPI_SLAVE
2777/**
2778 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2779 * controller
2780 * @spi: device used for the current transfer
2781 */
2782int spi_slave_abort(struct spi_device *spi)
2783{
2784 struct spi_controller *ctlr = spi->controller;
2785
2786 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2787 return ctlr->slave_abort(ctlr);
2788
2789 return -ENOTSUPP;
2790}
2791EXPORT_SYMBOL_GPL(spi_slave_abort);
2792
2793int spi_target_abort(struct spi_device *spi)
2794{
2795 struct spi_controller *ctlr = spi->controller;
2796
2797 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2798 return ctlr->target_abort(ctlr);
2799
2800 return -ENOTSUPP;
2801}
2802EXPORT_SYMBOL_GPL(spi_target_abort);
2803
2804static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2805 char *buf)
2806{
2807 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2808 dev);
2809 struct device *child;
2810
2811 child = device_find_any_child(&ctlr->dev);
2812 return sprintf(buf, "%s\n",
2813 child ? to_spi_device(child)->modalias : NULL);
2814}
2815
2816static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2817 const char *buf, size_t count)
2818{
2819 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2820 dev);
2821 struct spi_device *spi;
2822 struct device *child;
2823 char name[32];
2824 int rc;
2825
2826 rc = sscanf(buf, "%31s", name);
2827 if (rc != 1 || !name[0])
2828 return -EINVAL;
2829
2830 child = device_find_any_child(&ctlr->dev);
2831 if (child) {
2832 /* Remove registered slave */
2833 device_unregister(child);
2834 put_device(child);
2835 }
2836
2837 if (strcmp(name, "(null)")) {
2838 /* Register new slave */
2839 spi = spi_alloc_device(ctlr);
2840 if (!spi)
2841 return -ENOMEM;
2842
2843 strscpy(spi->modalias, name, sizeof(spi->modalias));
2844
2845 rc = spi_add_device(spi);
2846 if (rc) {
2847 spi_dev_put(spi);
2848 return rc;
2849 }
2850 }
2851
2852 return count;
2853}
2854
2855static DEVICE_ATTR_RW(slave);
2856
2857static struct attribute *spi_slave_attrs[] = {
2858 &dev_attr_slave.attr,
2859 NULL,
2860};
2861
2862static const struct attribute_group spi_slave_group = {
2863 .attrs = spi_slave_attrs,
2864};
2865
2866static const struct attribute_group *spi_slave_groups[] = {
2867 &spi_controller_statistics_group,
2868 &spi_slave_group,
2869 NULL,
2870};
2871
2872static struct class spi_slave_class = {
2873 .name = "spi_slave",
2874 .owner = THIS_MODULE,
2875 .dev_release = spi_controller_release,
2876 .dev_groups = spi_slave_groups,
2877};
2878#else
2879extern struct class spi_slave_class; /* dummy */
2880#endif
2881
2882/**
2883 * __spi_alloc_controller - allocate an SPI master or slave controller
2884 * @dev: the controller, possibly using the platform_bus
2885 * @size: how much zeroed driver-private data to allocate; the pointer to this
2886 * memory is in the driver_data field of the returned device, accessible
2887 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2888 * drivers granting DMA access to portions of their private data need to
2889 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2890 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2891 * slave (true) controller
2892 * Context: can sleep
2893 *
2894 * This call is used only by SPI controller drivers, which are the
2895 * only ones directly touching chip registers. It's how they allocate
2896 * an spi_controller structure, prior to calling spi_register_controller().
2897 *
2898 * This must be called from context that can sleep.
2899 *
2900 * The caller is responsible for assigning the bus number and initializing the
2901 * controller's methods before calling spi_register_controller(); and (after
2902 * errors adding the device) calling spi_controller_put() to prevent a memory
2903 * leak.
2904 *
2905 * Return: the SPI controller structure on success, else NULL.
2906 */
2907struct spi_controller *__spi_alloc_controller(struct device *dev,
2908 unsigned int size, bool slave)
2909{
2910 struct spi_controller *ctlr;
2911 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2912
2913 if (!dev)
2914 return NULL;
2915
2916 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2917 if (!ctlr)
2918 return NULL;
2919
2920 device_initialize(&ctlr->dev);
2921 INIT_LIST_HEAD(&ctlr->queue);
2922 spin_lock_init(&ctlr->queue_lock);
2923 spin_lock_init(&ctlr->bus_lock_spinlock);
2924 mutex_init(&ctlr->bus_lock_mutex);
2925 mutex_init(&ctlr->io_mutex);
2926 mutex_init(&ctlr->add_lock);
2927 ctlr->bus_num = -1;
2928 ctlr->num_chipselect = 1;
2929 ctlr->slave = slave;
2930 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2931 ctlr->dev.class = &spi_slave_class;
2932 else
2933 ctlr->dev.class = &spi_master_class;
2934 ctlr->dev.parent = dev;
2935 pm_suspend_ignore_children(&ctlr->dev, true);
2936 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2937
2938 return ctlr;
2939}
2940EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2941
2942static void devm_spi_release_controller(struct device *dev, void *ctlr)
2943{
2944 spi_controller_put(*(struct spi_controller **)ctlr);
2945}
2946
2947/**
2948 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2949 * @dev: physical device of SPI controller
2950 * @size: how much zeroed driver-private data to allocate
2951 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2952 * Context: can sleep
2953 *
2954 * Allocate an SPI controller and automatically release a reference on it
2955 * when @dev is unbound from its driver. Drivers are thus relieved from
2956 * having to call spi_controller_put().
2957 *
2958 * The arguments to this function are identical to __spi_alloc_controller().
2959 *
2960 * Return: the SPI controller structure on success, else NULL.
2961 */
2962struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2963 unsigned int size,
2964 bool slave)
2965{
2966 struct spi_controller **ptr, *ctlr;
2967
2968 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2969 GFP_KERNEL);
2970 if (!ptr)
2971 return NULL;
2972
2973 ctlr = __spi_alloc_controller(dev, size, slave);
2974 if (ctlr) {
2975 ctlr->devm_allocated = true;
2976 *ptr = ctlr;
2977 devres_add(dev, ptr);
2978 } else {
2979 devres_free(ptr);
2980 }
2981
2982 return ctlr;
2983}
2984EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2985
2986/**
2987 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2988 * @ctlr: The SPI master to grab GPIO descriptors for
2989 */
2990static int spi_get_gpio_descs(struct spi_controller *ctlr)
2991{
2992 int nb, i;
2993 struct gpio_desc **cs;
2994 struct device *dev = &ctlr->dev;
2995 unsigned long native_cs_mask = 0;
2996 unsigned int num_cs_gpios = 0;
2997
2998 nb = gpiod_count(dev, "cs");
2999 if (nb < 0) {
3000 /* No GPIOs at all is fine, else return the error */
3001 if (nb == -ENOENT)
3002 return 0;
3003 return nb;
3004 }
3005
3006 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3007
3008 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3009 GFP_KERNEL);
3010 if (!cs)
3011 return -ENOMEM;
3012 ctlr->cs_gpiods = cs;
3013
3014 for (i = 0; i < nb; i++) {
3015 /*
3016 * Most chipselects are active low, the inverted
3017 * semantics are handled by special quirks in gpiolib,
3018 * so initializing them GPIOD_OUT_LOW here means
3019 * "unasserted", in most cases this will drive the physical
3020 * line high.
3021 */
3022 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3023 GPIOD_OUT_LOW);
3024 if (IS_ERR(cs[i]))
3025 return PTR_ERR(cs[i]);
3026
3027 if (cs[i]) {
3028 /*
3029 * If we find a CS GPIO, name it after the device and
3030 * chip select line.
3031 */
3032 char *gpioname;
3033
3034 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3035 dev_name(dev), i);
3036 if (!gpioname)
3037 return -ENOMEM;
3038 gpiod_set_consumer_name(cs[i], gpioname);
3039 num_cs_gpios++;
3040 continue;
3041 }
3042
3043 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3044 dev_err(dev, "Invalid native chip select %d\n", i);
3045 return -EINVAL;
3046 }
3047 native_cs_mask |= BIT(i);
3048 }
3049
3050 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3051
3052 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
3053 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3054 dev_err(dev, "No unused native chip select available\n");
3055 return -EINVAL;
3056 }
3057
3058 return 0;
3059}
3060
3061static int spi_controller_check_ops(struct spi_controller *ctlr)
3062{
3063 /*
3064 * The controller may implement only the high-level SPI-memory like
3065 * operations if it does not support regular SPI transfers, and this is
3066 * valid use case.
3067 * If ->mem_ops is NULL, we request that at least one of the
3068 * ->transfer_xxx() method be implemented.
3069 */
3070 if (ctlr->mem_ops) {
3071 if (!ctlr->mem_ops->exec_op)
3072 return -EINVAL;
3073 } else if (!ctlr->transfer && !ctlr->transfer_one &&
3074 !ctlr->transfer_one_message) {
3075 return -EINVAL;
3076 }
3077
3078 return 0;
3079}
3080
3081/**
3082 * spi_register_controller - register SPI master or slave controller
3083 * @ctlr: initialized master, originally from spi_alloc_master() or
3084 * spi_alloc_slave()
3085 * Context: can sleep
3086 *
3087 * SPI controllers connect to their drivers using some non-SPI bus,
3088 * such as the platform bus. The final stage of probe() in that code
3089 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3090 *
3091 * SPI controllers use board specific (often SOC specific) bus numbers,
3092 * and board-specific addressing for SPI devices combines those numbers
3093 * with chip select numbers. Since SPI does not directly support dynamic
3094 * device identification, boards need configuration tables telling which
3095 * chip is at which address.
3096 *
3097 * This must be called from context that can sleep. It returns zero on
3098 * success, else a negative error code (dropping the controller's refcount).
3099 * After a successful return, the caller is responsible for calling
3100 * spi_unregister_controller().
3101 *
3102 * Return: zero on success, else a negative error code.
3103 */
3104int spi_register_controller(struct spi_controller *ctlr)
3105{
3106 struct device *dev = ctlr->dev.parent;
3107 struct boardinfo *bi;
3108 int status;
3109 int id, first_dynamic;
3110
3111 if (!dev)
3112 return -ENODEV;
3113
3114 /*
3115 * Make sure all necessary hooks are implemented before registering
3116 * the SPI controller.
3117 */
3118 status = spi_controller_check_ops(ctlr);
3119 if (status)
3120 return status;
3121
3122 if (ctlr->bus_num >= 0) {
3123 /* Devices with a fixed bus num must check-in with the num */
3124 mutex_lock(&board_lock);
3125 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3126 ctlr->bus_num + 1, GFP_KERNEL);
3127 mutex_unlock(&board_lock);
3128 if (WARN(id < 0, "couldn't get idr"))
3129 return id == -ENOSPC ? -EBUSY : id;
3130 ctlr->bus_num = id;
3131 } else if (ctlr->dev.of_node) {
3132 /* Allocate dynamic bus number using Linux idr */
3133 id = of_alias_get_id(ctlr->dev.of_node, "spi");
3134 if (id >= 0) {
3135 ctlr->bus_num = id;
3136 mutex_lock(&board_lock);
3137 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3138 ctlr->bus_num + 1, GFP_KERNEL);
3139 mutex_unlock(&board_lock);
3140 if (WARN(id < 0, "couldn't get idr"))
3141 return id == -ENOSPC ? -EBUSY : id;
3142 }
3143 }
3144 if (ctlr->bus_num < 0) {
3145 first_dynamic = of_alias_get_highest_id("spi");
3146 if (first_dynamic < 0)
3147 first_dynamic = 0;
3148 else
3149 first_dynamic++;
3150
3151 mutex_lock(&board_lock);
3152 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
3153 0, GFP_KERNEL);
3154 mutex_unlock(&board_lock);
3155 if (WARN(id < 0, "couldn't get idr"))
3156 return id;
3157 ctlr->bus_num = id;
3158 }
3159 ctlr->bus_lock_flag = 0;
3160 init_completion(&ctlr->xfer_completion);
3161 init_completion(&ctlr->cur_msg_completion);
3162 if (!ctlr->max_dma_len)
3163 ctlr->max_dma_len = INT_MAX;
3164
3165 /*
3166 * Register the device, then userspace will see it.
3167 * Registration fails if the bus ID is in use.
3168 */
3169 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3170
3171 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3172 status = spi_get_gpio_descs(ctlr);
3173 if (status)
3174 goto free_bus_id;
3175 /*
3176 * A controller using GPIO descriptors always
3177 * supports SPI_CS_HIGH if need be.
3178 */
3179 ctlr->mode_bits |= SPI_CS_HIGH;
3180 }
3181
3182 /*
3183 * Even if it's just one always-selected device, there must
3184 * be at least one chipselect.
3185 */
3186 if (!ctlr->num_chipselect) {
3187 status = -EINVAL;
3188 goto free_bus_id;
3189 }
3190
3191 /* Setting last_cs to -1 means no chip selected */
3192 ctlr->last_cs = -1;
3193
3194 status = device_add(&ctlr->dev);
3195 if (status < 0)
3196 goto free_bus_id;
3197 dev_dbg(dev, "registered %s %s\n",
3198 spi_controller_is_slave(ctlr) ? "slave" : "master",
3199 dev_name(&ctlr->dev));
3200
3201 /*
3202 * If we're using a queued driver, start the queue. Note that we don't
3203 * need the queueing logic if the driver is only supporting high-level
3204 * memory operations.
3205 */
3206 if (ctlr->transfer) {
3207 dev_info(dev, "controller is unqueued, this is deprecated\n");
3208 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3209 status = spi_controller_initialize_queue(ctlr);
3210 if (status) {
3211 device_del(&ctlr->dev);
3212 goto free_bus_id;
3213 }
3214 }
3215 /* Add statistics */
3216 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3217 if (!ctlr->pcpu_statistics) {
3218 dev_err(dev, "Error allocating per-cpu statistics\n");
3219 status = -ENOMEM;
3220 goto destroy_queue;
3221 }
3222
3223 mutex_lock(&board_lock);
3224 list_add_tail(&ctlr->list, &spi_controller_list);
3225 list_for_each_entry(bi, &board_list, list)
3226 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3227 mutex_unlock(&board_lock);
3228
3229 /* Register devices from the device tree and ACPI */
3230 of_register_spi_devices(ctlr);
3231 acpi_register_spi_devices(ctlr);
3232 return status;
3233
3234destroy_queue:
3235 spi_destroy_queue(ctlr);
3236free_bus_id:
3237 mutex_lock(&board_lock);
3238 idr_remove(&spi_master_idr, ctlr->bus_num);
3239 mutex_unlock(&board_lock);
3240 return status;
3241}
3242EXPORT_SYMBOL_GPL(spi_register_controller);
3243
3244static void devm_spi_unregister(struct device *dev, void *res)
3245{
3246 spi_unregister_controller(*(struct spi_controller **)res);
3247}
3248
3249/**
3250 * devm_spi_register_controller - register managed SPI master or slave
3251 * controller
3252 * @dev: device managing SPI controller
3253 * @ctlr: initialized controller, originally from spi_alloc_master() or
3254 * spi_alloc_slave()
3255 * Context: can sleep
3256 *
3257 * Register a SPI device as with spi_register_controller() which will
3258 * automatically be unregistered and freed.
3259 *
3260 * Return: zero on success, else a negative error code.
3261 */
3262int devm_spi_register_controller(struct device *dev,
3263 struct spi_controller *ctlr)
3264{
3265 struct spi_controller **ptr;
3266 int ret;
3267
3268 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3269 if (!ptr)
3270 return -ENOMEM;
3271
3272 ret = spi_register_controller(ctlr);
3273 if (!ret) {
3274 *ptr = ctlr;
3275 devres_add(dev, ptr);
3276 } else {
3277 devres_free(ptr);
3278 }
3279
3280 return ret;
3281}
3282EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3283
3284static int __unregister(struct device *dev, void *null)
3285{
3286 spi_unregister_device(to_spi_device(dev));
3287 return 0;
3288}
3289
3290/**
3291 * spi_unregister_controller - unregister SPI master or slave controller
3292 * @ctlr: the controller being unregistered
3293 * Context: can sleep
3294 *
3295 * This call is used only by SPI controller drivers, which are the
3296 * only ones directly touching chip registers.
3297 *
3298 * This must be called from context that can sleep.
3299 *
3300 * Note that this function also drops a reference to the controller.
3301 */
3302void spi_unregister_controller(struct spi_controller *ctlr)
3303{
3304 struct spi_controller *found;
3305 int id = ctlr->bus_num;
3306
3307 /* Prevent addition of new devices, unregister existing ones */
3308 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3309 mutex_lock(&ctlr->add_lock);
3310
3311 device_for_each_child(&ctlr->dev, NULL, __unregister);
3312
3313 /* First make sure that this controller was ever added */
3314 mutex_lock(&board_lock);
3315 found = idr_find(&spi_master_idr, id);
3316 mutex_unlock(&board_lock);
3317 if (ctlr->queued) {
3318 if (spi_destroy_queue(ctlr))
3319 dev_err(&ctlr->dev, "queue remove failed\n");
3320 }
3321 mutex_lock(&board_lock);
3322 list_del(&ctlr->list);
3323 mutex_unlock(&board_lock);
3324
3325 device_del(&ctlr->dev);
3326
3327 /* Free bus id */
3328 mutex_lock(&board_lock);
3329 if (found == ctlr)
3330 idr_remove(&spi_master_idr, id);
3331 mutex_unlock(&board_lock);
3332
3333 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3334 mutex_unlock(&ctlr->add_lock);
3335
3336 /* Release the last reference on the controller if its driver
3337 * has not yet been converted to devm_spi_alloc_master/slave().
3338 */
3339 if (!ctlr->devm_allocated)
3340 put_device(&ctlr->dev);
3341}
3342EXPORT_SYMBOL_GPL(spi_unregister_controller);
3343
3344int spi_controller_suspend(struct spi_controller *ctlr)
3345{
3346 int ret;
3347
3348 /* Basically no-ops for non-queued controllers */
3349 if (!ctlr->queued)
3350 return 0;
3351
3352 ret = spi_stop_queue(ctlr);
3353 if (ret)
3354 dev_err(&ctlr->dev, "queue stop failed\n");
3355
3356 return ret;
3357}
3358EXPORT_SYMBOL_GPL(spi_controller_suspend);
3359
3360int spi_controller_resume(struct spi_controller *ctlr)
3361{
3362 int ret;
3363
3364 if (!ctlr->queued)
3365 return 0;
3366
3367 ret = spi_start_queue(ctlr);
3368 if (ret)
3369 dev_err(&ctlr->dev, "queue restart failed\n");
3370
3371 return ret;
3372}
3373EXPORT_SYMBOL_GPL(spi_controller_resume);
3374
3375/*-------------------------------------------------------------------------*/
3376
3377/* Core methods for spi_message alterations */
3378
3379static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3380 struct spi_message *msg,
3381 void *res)
3382{
3383 struct spi_replaced_transfers *rxfer = res;
3384 size_t i;
3385
3386 /* Call extra callback if requested */
3387 if (rxfer->release)
3388 rxfer->release(ctlr, msg, res);
3389
3390 /* Insert replaced transfers back into the message */
3391 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3392
3393 /* Remove the formerly inserted entries */
3394 for (i = 0; i < rxfer->inserted; i++)
3395 list_del(&rxfer->inserted_transfers[i].transfer_list);
3396}
3397
3398/**
3399 * spi_replace_transfers - replace transfers with several transfers
3400 * and register change with spi_message.resources
3401 * @msg: the spi_message we work upon
3402 * @xfer_first: the first spi_transfer we want to replace
3403 * @remove: number of transfers to remove
3404 * @insert: the number of transfers we want to insert instead
3405 * @release: extra release code necessary in some circumstances
3406 * @extradatasize: extra data to allocate (with alignment guarantees
3407 * of struct @spi_transfer)
3408 * @gfp: gfp flags
3409 *
3410 * Returns: pointer to @spi_replaced_transfers,
3411 * PTR_ERR(...) in case of errors.
3412 */
3413static struct spi_replaced_transfers *spi_replace_transfers(
3414 struct spi_message *msg,
3415 struct spi_transfer *xfer_first,
3416 size_t remove,
3417 size_t insert,
3418 spi_replaced_release_t release,
3419 size_t extradatasize,
3420 gfp_t gfp)
3421{
3422 struct spi_replaced_transfers *rxfer;
3423 struct spi_transfer *xfer;
3424 size_t i;
3425
3426 /* Allocate the structure using spi_res */
3427 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3428 struct_size(rxfer, inserted_transfers, insert)
3429 + extradatasize,
3430 gfp);
3431 if (!rxfer)
3432 return ERR_PTR(-ENOMEM);
3433
3434 /* The release code to invoke before running the generic release */
3435 rxfer->release = release;
3436
3437 /* Assign extradata */
3438 if (extradatasize)
3439 rxfer->extradata =
3440 &rxfer->inserted_transfers[insert];
3441
3442 /* Init the replaced_transfers list */
3443 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3444
3445 /*
3446 * Assign the list_entry after which we should reinsert
3447 * the @replaced_transfers - it may be spi_message.messages!
3448 */
3449 rxfer->replaced_after = xfer_first->transfer_list.prev;
3450
3451 /* Remove the requested number of transfers */
3452 for (i = 0; i < remove; i++) {
3453 /*
3454 * If the entry after replaced_after it is msg->transfers
3455 * then we have been requested to remove more transfers
3456 * than are in the list.
3457 */
3458 if (rxfer->replaced_after->next == &msg->transfers) {
3459 dev_err(&msg->spi->dev,
3460 "requested to remove more spi_transfers than are available\n");
3461 /* Insert replaced transfers back into the message */
3462 list_splice(&rxfer->replaced_transfers,
3463 rxfer->replaced_after);
3464
3465 /* Free the spi_replace_transfer structure... */
3466 spi_res_free(rxfer);
3467
3468 /* ...and return with an error */
3469 return ERR_PTR(-EINVAL);
3470 }
3471
3472 /*
3473 * Remove the entry after replaced_after from list of
3474 * transfers and add it to list of replaced_transfers.
3475 */
3476 list_move_tail(rxfer->replaced_after->next,
3477 &rxfer->replaced_transfers);
3478 }
3479
3480 /*
3481 * Create copy of the given xfer with identical settings
3482 * based on the first transfer to get removed.
3483 */
3484 for (i = 0; i < insert; i++) {
3485 /* We need to run in reverse order */
3486 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3487
3488 /* Copy all spi_transfer data */
3489 memcpy(xfer, xfer_first, sizeof(*xfer));
3490
3491 /* Add to list */
3492 list_add(&xfer->transfer_list, rxfer->replaced_after);
3493
3494 /* Clear cs_change and delay for all but the last */
3495 if (i) {
3496 xfer->cs_change = false;
3497 xfer->delay.value = 0;
3498 }
3499 }
3500
3501 /* Set up inserted... */
3502 rxfer->inserted = insert;
3503
3504 /* ...and register it with spi_res/spi_message */
3505 spi_res_add(msg, rxfer);
3506
3507 return rxfer;
3508}
3509
3510static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3511 struct spi_message *msg,
3512 struct spi_transfer **xferp,
3513 size_t maxsize,
3514 gfp_t gfp)
3515{
3516 struct spi_transfer *xfer = *xferp, *xfers;
3517 struct spi_replaced_transfers *srt;
3518 size_t offset;
3519 size_t count, i;
3520
3521 /* Calculate how many we have to replace */
3522 count = DIV_ROUND_UP(xfer->len, maxsize);
3523
3524 /* Create replacement */
3525 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3526 if (IS_ERR(srt))
3527 return PTR_ERR(srt);
3528 xfers = srt->inserted_transfers;
3529
3530 /*
3531 * Now handle each of those newly inserted spi_transfers.
3532 * Note that the replacements spi_transfers all are preset
3533 * to the same values as *xferp, so tx_buf, rx_buf and len
3534 * are all identical (as well as most others)
3535 * so we just have to fix up len and the pointers.
3536 *
3537 * This also includes support for the depreciated
3538 * spi_message.is_dma_mapped interface.
3539 */
3540
3541 /*
3542 * The first transfer just needs the length modified, so we
3543 * run it outside the loop.
3544 */
3545 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3546
3547 /* All the others need rx_buf/tx_buf also set */
3548 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3549 /* Update rx_buf, tx_buf and dma */
3550 if (xfers[i].rx_buf)
3551 xfers[i].rx_buf += offset;
3552 if (xfers[i].rx_dma)
3553 xfers[i].rx_dma += offset;
3554 if (xfers[i].tx_buf)
3555 xfers[i].tx_buf += offset;
3556 if (xfers[i].tx_dma)
3557 xfers[i].tx_dma += offset;
3558
3559 /* Update length */
3560 xfers[i].len = min(maxsize, xfers[i].len - offset);
3561 }
3562
3563 /*
3564 * We set up xferp to the last entry we have inserted,
3565 * so that we skip those already split transfers.
3566 */
3567 *xferp = &xfers[count - 1];
3568
3569 /* Increment statistics counters */
3570 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3571 transfers_split_maxsize);
3572 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3573 transfers_split_maxsize);
3574
3575 return 0;
3576}
3577
3578/**
3579 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3580 * when an individual transfer exceeds a
3581 * certain size
3582 * @ctlr: the @spi_controller for this transfer
3583 * @msg: the @spi_message to transform
3584 * @maxsize: the maximum when to apply this
3585 * @gfp: GFP allocation flags
3586 *
3587 * Return: status of transformation
3588 */
3589int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3590 struct spi_message *msg,
3591 size_t maxsize,
3592 gfp_t gfp)
3593{
3594 struct spi_transfer *xfer;
3595 int ret;
3596
3597 /*
3598 * Iterate over the transfer_list,
3599 * but note that xfer is advanced to the last transfer inserted
3600 * to avoid checking sizes again unnecessarily (also xfer does
3601 * potentially belong to a different list by the time the
3602 * replacement has happened).
3603 */
3604 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3605 if (xfer->len > maxsize) {
3606 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3607 maxsize, gfp);
3608 if (ret)
3609 return ret;
3610 }
3611 }
3612
3613 return 0;
3614}
3615EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3616
3617/*-------------------------------------------------------------------------*/
3618
3619/* Core methods for SPI controller protocol drivers. Some of the
3620 * other core methods are currently defined as inline functions.
3621 */
3622
3623static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3624 u8 bits_per_word)
3625{
3626 if (ctlr->bits_per_word_mask) {
3627 /* Only 32 bits fit in the mask */
3628 if (bits_per_word > 32)
3629 return -EINVAL;
3630 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3631 return -EINVAL;
3632 }
3633
3634 return 0;
3635}
3636
3637/**
3638 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3639 * @spi: the device that requires specific CS timing configuration
3640 *
3641 * Return: zero on success, else a negative error code.
3642 */
3643static int spi_set_cs_timing(struct spi_device *spi)
3644{
3645 struct device *parent = spi->controller->dev.parent;
3646 int status = 0;
3647
3648 if (spi->controller->set_cs_timing && !spi->cs_gpiod) {
3649 if (spi->controller->auto_runtime_pm) {
3650 status = pm_runtime_get_sync(parent);
3651 if (status < 0) {
3652 pm_runtime_put_noidle(parent);
3653 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3654 status);
3655 return status;
3656 }
3657
3658 status = spi->controller->set_cs_timing(spi);
3659 pm_runtime_mark_last_busy(parent);
3660 pm_runtime_put_autosuspend(parent);
3661 } else {
3662 status = spi->controller->set_cs_timing(spi);
3663 }
3664 }
3665 return status;
3666}
3667
3668/**
3669 * spi_setup - setup SPI mode and clock rate
3670 * @spi: the device whose settings are being modified
3671 * Context: can sleep, and no requests are queued to the device
3672 *
3673 * SPI protocol drivers may need to update the transfer mode if the
3674 * device doesn't work with its default. They may likewise need
3675 * to update clock rates or word sizes from initial values. This function
3676 * changes those settings, and must be called from a context that can sleep.
3677 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3678 * effect the next time the device is selected and data is transferred to
3679 * or from it. When this function returns, the spi device is deselected.
3680 *
3681 * Note that this call will fail if the protocol driver specifies an option
3682 * that the underlying controller or its driver does not support. For
3683 * example, not all hardware supports wire transfers using nine bit words,
3684 * LSB-first wire encoding, or active-high chipselects.
3685 *
3686 * Return: zero on success, else a negative error code.
3687 */
3688int spi_setup(struct spi_device *spi)
3689{
3690 unsigned bad_bits, ugly_bits;
3691 int status = 0;
3692
3693 /*
3694 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3695 * are set at the same time.
3696 */
3697 if ((hweight_long(spi->mode &
3698 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3699 (hweight_long(spi->mode &
3700 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3701 dev_err(&spi->dev,
3702 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3703 return -EINVAL;
3704 }
3705 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3706 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3707 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3708 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3709 return -EINVAL;
3710 /*
3711 * Help drivers fail *cleanly* when they need options
3712 * that aren't supported with their current controller.
3713 * SPI_CS_WORD has a fallback software implementation,
3714 * so it is ignored here.
3715 */
3716 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3717 SPI_NO_TX | SPI_NO_RX);
3718 ugly_bits = bad_bits &
3719 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3720 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3721 if (ugly_bits) {
3722 dev_warn(&spi->dev,
3723 "setup: ignoring unsupported mode bits %x\n",
3724 ugly_bits);
3725 spi->mode &= ~ugly_bits;
3726 bad_bits &= ~ugly_bits;
3727 }
3728 if (bad_bits) {
3729 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3730 bad_bits);
3731 return -EINVAL;
3732 }
3733
3734 if (!spi->bits_per_word) {
3735 spi->bits_per_word = 8;
3736 } else {
3737 /*
3738 * Some controllers may not support the default 8 bits-per-word
3739 * so only perform the check when this is explicitly provided.
3740 */
3741 status = __spi_validate_bits_per_word(spi->controller,
3742 spi->bits_per_word);
3743 if (status)
3744 return status;
3745 }
3746
3747 if (spi->controller->max_speed_hz &&
3748 (!spi->max_speed_hz ||
3749 spi->max_speed_hz > spi->controller->max_speed_hz))
3750 spi->max_speed_hz = spi->controller->max_speed_hz;
3751
3752 mutex_lock(&spi->controller->io_mutex);
3753
3754 if (spi->controller->setup) {
3755 status = spi->controller->setup(spi);
3756 if (status) {
3757 mutex_unlock(&spi->controller->io_mutex);
3758 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3759 status);
3760 return status;
3761 }
3762 }
3763
3764 status = spi_set_cs_timing(spi);
3765 if (status) {
3766 mutex_unlock(&spi->controller->io_mutex);
3767 return status;
3768 }
3769
3770 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3771 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3772 if (status < 0) {
3773 mutex_unlock(&spi->controller->io_mutex);
3774 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3775 status);
3776 return status;
3777 }
3778
3779 /*
3780 * We do not want to return positive value from pm_runtime_get,
3781 * there are many instances of devices calling spi_setup() and
3782 * checking for a non-zero return value instead of a negative
3783 * return value.
3784 */
3785 status = 0;
3786
3787 spi_set_cs(spi, false, true);
3788 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3789 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3790 } else {
3791 spi_set_cs(spi, false, true);
3792 }
3793
3794 mutex_unlock(&spi->controller->io_mutex);
3795
3796 if (spi->rt && !spi->controller->rt) {
3797 spi->controller->rt = true;
3798 spi_set_thread_rt(spi->controller);
3799 }
3800
3801 trace_spi_setup(spi, status);
3802
3803 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3804 spi->mode & SPI_MODE_X_MASK,
3805 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3806 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3807 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3808 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3809 spi->bits_per_word, spi->max_speed_hz,
3810 status);
3811
3812 return status;
3813}
3814EXPORT_SYMBOL_GPL(spi_setup);
3815
3816static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3817 struct spi_device *spi)
3818{
3819 int delay1, delay2;
3820
3821 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3822 if (delay1 < 0)
3823 return delay1;
3824
3825 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3826 if (delay2 < 0)
3827 return delay2;
3828
3829 if (delay1 < delay2)
3830 memcpy(&xfer->word_delay, &spi->word_delay,
3831 sizeof(xfer->word_delay));
3832
3833 return 0;
3834}
3835
3836static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3837{
3838 struct spi_controller *ctlr = spi->controller;
3839 struct spi_transfer *xfer;
3840 int w_size;
3841
3842 if (list_empty(&message->transfers))
3843 return -EINVAL;
3844
3845 /*
3846 * If an SPI controller does not support toggling the CS line on each
3847 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3848 * for the CS line, we can emulate the CS-per-word hardware function by
3849 * splitting transfers into one-word transfers and ensuring that
3850 * cs_change is set for each transfer.
3851 */
3852 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3853 spi->cs_gpiod)) {
3854 size_t maxsize;
3855 int ret;
3856
3857 maxsize = (spi->bits_per_word + 7) / 8;
3858
3859 /* spi_split_transfers_maxsize() requires message->spi */
3860 message->spi = spi;
3861
3862 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3863 GFP_KERNEL);
3864 if (ret)
3865 return ret;
3866
3867 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3868 /* Don't change cs_change on the last entry in the list */
3869 if (list_is_last(&xfer->transfer_list, &message->transfers))
3870 break;
3871 xfer->cs_change = 1;
3872 }
3873 }
3874
3875 /*
3876 * Half-duplex links include original MicroWire, and ones with
3877 * only one data pin like SPI_3WIRE (switches direction) or where
3878 * either MOSI or MISO is missing. They can also be caused by
3879 * software limitations.
3880 */
3881 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3882 (spi->mode & SPI_3WIRE)) {
3883 unsigned flags = ctlr->flags;
3884
3885 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3886 if (xfer->rx_buf && xfer->tx_buf)
3887 return -EINVAL;
3888 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3889 return -EINVAL;
3890 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3891 return -EINVAL;
3892 }
3893 }
3894
3895 /*
3896 * Set transfer bits_per_word and max speed as spi device default if
3897 * it is not set for this transfer.
3898 * Set transfer tx_nbits and rx_nbits as single transfer default
3899 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3900 * Ensure transfer word_delay is at least as long as that required by
3901 * device itself.
3902 */
3903 message->frame_length = 0;
3904 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3905 xfer->effective_speed_hz = 0;
3906 message->frame_length += xfer->len;
3907 if (!xfer->bits_per_word)
3908 xfer->bits_per_word = spi->bits_per_word;
3909
3910 if (!xfer->speed_hz)
3911 xfer->speed_hz = spi->max_speed_hz;
3912
3913 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3914 xfer->speed_hz = ctlr->max_speed_hz;
3915
3916 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3917 return -EINVAL;
3918
3919 /*
3920 * SPI transfer length should be multiple of SPI word size
3921 * where SPI word size should be power-of-two multiple.
3922 */
3923 if (xfer->bits_per_word <= 8)
3924 w_size = 1;
3925 else if (xfer->bits_per_word <= 16)
3926 w_size = 2;
3927 else
3928 w_size = 4;
3929
3930 /* No partial transfers accepted */
3931 if (xfer->len % w_size)
3932 return -EINVAL;
3933
3934 if (xfer->speed_hz && ctlr->min_speed_hz &&
3935 xfer->speed_hz < ctlr->min_speed_hz)
3936 return -EINVAL;
3937
3938 if (xfer->tx_buf && !xfer->tx_nbits)
3939 xfer->tx_nbits = SPI_NBITS_SINGLE;
3940 if (xfer->rx_buf && !xfer->rx_nbits)
3941 xfer->rx_nbits = SPI_NBITS_SINGLE;
3942 /*
3943 * Check transfer tx/rx_nbits:
3944 * 1. check the value matches one of single, dual and quad
3945 * 2. check tx/rx_nbits match the mode in spi_device
3946 */
3947 if (xfer->tx_buf) {
3948 if (spi->mode & SPI_NO_TX)
3949 return -EINVAL;
3950 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3951 xfer->tx_nbits != SPI_NBITS_DUAL &&
3952 xfer->tx_nbits != SPI_NBITS_QUAD)
3953 return -EINVAL;
3954 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3955 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3956 return -EINVAL;
3957 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3958 !(spi->mode & SPI_TX_QUAD))
3959 return -EINVAL;
3960 }
3961 /* Check transfer rx_nbits */
3962 if (xfer->rx_buf) {
3963 if (spi->mode & SPI_NO_RX)
3964 return -EINVAL;
3965 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3966 xfer->rx_nbits != SPI_NBITS_DUAL &&
3967 xfer->rx_nbits != SPI_NBITS_QUAD)
3968 return -EINVAL;
3969 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3970 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3971 return -EINVAL;
3972 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3973 !(spi->mode & SPI_RX_QUAD))
3974 return -EINVAL;
3975 }
3976
3977 if (_spi_xfer_word_delay_update(xfer, spi))
3978 return -EINVAL;
3979 }
3980
3981 message->status = -EINPROGRESS;
3982
3983 return 0;
3984}
3985
3986static int __spi_async(struct spi_device *spi, struct spi_message *message)
3987{
3988 struct spi_controller *ctlr = spi->controller;
3989 struct spi_transfer *xfer;
3990
3991 /*
3992 * Some controllers do not support doing regular SPI transfers. Return
3993 * ENOTSUPP when this is the case.
3994 */
3995 if (!ctlr->transfer)
3996 return -ENOTSUPP;
3997
3998 message->spi = spi;
3999
4000 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4001 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4002
4003 trace_spi_message_submit(message);
4004
4005 if (!ctlr->ptp_sts_supported) {
4006 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4007 xfer->ptp_sts_word_pre = 0;
4008 ptp_read_system_prets(xfer->ptp_sts);
4009 }
4010 }
4011
4012 return ctlr->transfer(spi, message);
4013}
4014
4015/**
4016 * spi_async - asynchronous SPI transfer
4017 * @spi: device with which data will be exchanged
4018 * @message: describes the data transfers, including completion callback
4019 * Context: any (irqs may be blocked, etc)
4020 *
4021 * This call may be used in_irq and other contexts which can't sleep,
4022 * as well as from task contexts which can sleep.
4023 *
4024 * The completion callback is invoked in a context which can't sleep.
4025 * Before that invocation, the value of message->status is undefined.
4026 * When the callback is issued, message->status holds either zero (to
4027 * indicate complete success) or a negative error code. After that
4028 * callback returns, the driver which issued the transfer request may
4029 * deallocate the associated memory; it's no longer in use by any SPI
4030 * core or controller driver code.
4031 *
4032 * Note that although all messages to a spi_device are handled in
4033 * FIFO order, messages may go to different devices in other orders.
4034 * Some device might be higher priority, or have various "hard" access
4035 * time requirements, for example.
4036 *
4037 * On detection of any fault during the transfer, processing of
4038 * the entire message is aborted, and the device is deselected.
4039 * Until returning from the associated message completion callback,
4040 * no other spi_message queued to that device will be processed.
4041 * (This rule applies equally to all the synchronous transfer calls,
4042 * which are wrappers around this core asynchronous primitive.)
4043 *
4044 * Return: zero on success, else a negative error code.
4045 */
4046int spi_async(struct spi_device *spi, struct spi_message *message)
4047{
4048 struct spi_controller *ctlr = spi->controller;
4049 int ret;
4050 unsigned long flags;
4051
4052 ret = __spi_validate(spi, message);
4053 if (ret != 0)
4054 return ret;
4055
4056 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4057
4058 if (ctlr->bus_lock_flag)
4059 ret = -EBUSY;
4060 else
4061 ret = __spi_async(spi, message);
4062
4063 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4064
4065 return ret;
4066}
4067EXPORT_SYMBOL_GPL(spi_async);
4068
4069/**
4070 * spi_async_locked - version of spi_async with exclusive bus usage
4071 * @spi: device with which data will be exchanged
4072 * @message: describes the data transfers, including completion callback
4073 * Context: any (irqs may be blocked, etc)
4074 *
4075 * This call may be used in_irq and other contexts which can't sleep,
4076 * as well as from task contexts which can sleep.
4077 *
4078 * The completion callback is invoked in a context which can't sleep.
4079 * Before that invocation, the value of message->status is undefined.
4080 * When the callback is issued, message->status holds either zero (to
4081 * indicate complete success) or a negative error code. After that
4082 * callback returns, the driver which issued the transfer request may
4083 * deallocate the associated memory; it's no longer in use by any SPI
4084 * core or controller driver code.
4085 *
4086 * Note that although all messages to a spi_device are handled in
4087 * FIFO order, messages may go to different devices in other orders.
4088 * Some device might be higher priority, or have various "hard" access
4089 * time requirements, for example.
4090 *
4091 * On detection of any fault during the transfer, processing of
4092 * the entire message is aborted, and the device is deselected.
4093 * Until returning from the associated message completion callback,
4094 * no other spi_message queued to that device will be processed.
4095 * (This rule applies equally to all the synchronous transfer calls,
4096 * which are wrappers around this core asynchronous primitive.)
4097 *
4098 * Return: zero on success, else a negative error code.
4099 */
4100static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
4101{
4102 struct spi_controller *ctlr = spi->controller;
4103 int ret;
4104 unsigned long flags;
4105
4106 ret = __spi_validate(spi, message);
4107 if (ret != 0)
4108 return ret;
4109
4110 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4111
4112 ret = __spi_async(spi, message);
4113
4114 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4115
4116 return ret;
4117
4118}
4119
4120static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4121{
4122 bool was_busy;
4123 int ret;
4124
4125 mutex_lock(&ctlr->io_mutex);
4126
4127 was_busy = ctlr->busy;
4128
4129 ctlr->cur_msg = msg;
4130 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4131 if (ret)
4132 goto out;
4133
4134 ctlr->cur_msg = NULL;
4135 ctlr->fallback = false;
4136
4137 if (!was_busy) {
4138 kfree(ctlr->dummy_rx);
4139 ctlr->dummy_rx = NULL;
4140 kfree(ctlr->dummy_tx);
4141 ctlr->dummy_tx = NULL;
4142 if (ctlr->unprepare_transfer_hardware &&
4143 ctlr->unprepare_transfer_hardware(ctlr))
4144 dev_err(&ctlr->dev,
4145 "failed to unprepare transfer hardware\n");
4146 spi_idle_runtime_pm(ctlr);
4147 }
4148
4149out:
4150 mutex_unlock(&ctlr->io_mutex);
4151}
4152
4153/*-------------------------------------------------------------------------*/
4154
4155/*
4156 * Utility methods for SPI protocol drivers, layered on
4157 * top of the core. Some other utility methods are defined as
4158 * inline functions.
4159 */
4160
4161static void spi_complete(void *arg)
4162{
4163 complete(arg);
4164}
4165
4166static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4167{
4168 DECLARE_COMPLETION_ONSTACK(done);
4169 int status;
4170 struct spi_controller *ctlr = spi->controller;
4171
4172 status = __spi_validate(spi, message);
4173 if (status != 0)
4174 return status;
4175
4176 message->spi = spi;
4177
4178 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4179 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4180
4181 /*
4182 * Checking queue_empty here only guarantees async/sync message
4183 * ordering when coming from the same context. It does not need to
4184 * guard against reentrancy from a different context. The io_mutex
4185 * will catch those cases.
4186 */
4187 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4188 message->actual_length = 0;
4189 message->status = -EINPROGRESS;
4190
4191 trace_spi_message_submit(message);
4192
4193 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4194 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4195
4196 __spi_transfer_message_noqueue(ctlr, message);
4197
4198 return message->status;
4199 }
4200
4201 /*
4202 * There are messages in the async queue that could have originated
4203 * from the same context, so we need to preserve ordering.
4204 * Therefor we send the message to the async queue and wait until they
4205 * are completed.
4206 */
4207 message->complete = spi_complete;
4208 message->context = &done;
4209 status = spi_async_locked(spi, message);
4210 if (status == 0) {
4211 wait_for_completion(&done);
4212 status = message->status;
4213 }
4214 message->context = NULL;
4215
4216 return status;
4217}
4218
4219/**
4220 * spi_sync - blocking/synchronous SPI data transfers
4221 * @spi: device with which data will be exchanged
4222 * @message: describes the data transfers
4223 * Context: can sleep
4224 *
4225 * This call may only be used from a context that may sleep. The sleep
4226 * is non-interruptible, and has no timeout. Low-overhead controller
4227 * drivers may DMA directly into and out of the message buffers.
4228 *
4229 * Note that the SPI device's chip select is active during the message,
4230 * and then is normally disabled between messages. Drivers for some
4231 * frequently-used devices may want to minimize costs of selecting a chip,
4232 * by leaving it selected in anticipation that the next message will go
4233 * to the same chip. (That may increase power usage.)
4234 *
4235 * Also, the caller is guaranteeing that the memory associated with the
4236 * message will not be freed before this call returns.
4237 *
4238 * Return: zero on success, else a negative error code.
4239 */
4240int spi_sync(struct spi_device *spi, struct spi_message *message)
4241{
4242 int ret;
4243
4244 mutex_lock(&spi->controller->bus_lock_mutex);
4245 ret = __spi_sync(spi, message);
4246 mutex_unlock(&spi->controller->bus_lock_mutex);
4247
4248 return ret;
4249}
4250EXPORT_SYMBOL_GPL(spi_sync);
4251
4252/**
4253 * spi_sync_locked - version of spi_sync with exclusive bus usage
4254 * @spi: device with which data will be exchanged
4255 * @message: describes the data transfers
4256 * Context: can sleep
4257 *
4258 * This call may only be used from a context that may sleep. The sleep
4259 * is non-interruptible, and has no timeout. Low-overhead controller
4260 * drivers may DMA directly into and out of the message buffers.
4261 *
4262 * This call should be used by drivers that require exclusive access to the
4263 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4264 * be released by a spi_bus_unlock call when the exclusive access is over.
4265 *
4266 * Return: zero on success, else a negative error code.
4267 */
4268int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4269{
4270 return __spi_sync(spi, message);
4271}
4272EXPORT_SYMBOL_GPL(spi_sync_locked);
4273
4274/**
4275 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4276 * @ctlr: SPI bus master that should be locked for exclusive bus access
4277 * Context: can sleep
4278 *
4279 * This call may only be used from a context that may sleep. The sleep
4280 * is non-interruptible, and has no timeout.
4281 *
4282 * This call should be used by drivers that require exclusive access to the
4283 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4284 * exclusive access is over. Data transfer must be done by spi_sync_locked
4285 * and spi_async_locked calls when the SPI bus lock is held.
4286 *
4287 * Return: always zero.
4288 */
4289int spi_bus_lock(struct spi_controller *ctlr)
4290{
4291 unsigned long flags;
4292
4293 mutex_lock(&ctlr->bus_lock_mutex);
4294
4295 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4296 ctlr->bus_lock_flag = 1;
4297 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4298
4299 /* Mutex remains locked until spi_bus_unlock() is called */
4300
4301 return 0;
4302}
4303EXPORT_SYMBOL_GPL(spi_bus_lock);
4304
4305/**
4306 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4307 * @ctlr: SPI bus master that was locked for exclusive bus access
4308 * Context: can sleep
4309 *
4310 * This call may only be used from a context that may sleep. The sleep
4311 * is non-interruptible, and has no timeout.
4312 *
4313 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4314 * call.
4315 *
4316 * Return: always zero.
4317 */
4318int spi_bus_unlock(struct spi_controller *ctlr)
4319{
4320 ctlr->bus_lock_flag = 0;
4321
4322 mutex_unlock(&ctlr->bus_lock_mutex);
4323
4324 return 0;
4325}
4326EXPORT_SYMBOL_GPL(spi_bus_unlock);
4327
4328/* Portable code must never pass more than 32 bytes */
4329#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4330
4331static u8 *buf;
4332
4333/**
4334 * spi_write_then_read - SPI synchronous write followed by read
4335 * @spi: device with which data will be exchanged
4336 * @txbuf: data to be written (need not be dma-safe)
4337 * @n_tx: size of txbuf, in bytes
4338 * @rxbuf: buffer into which data will be read (need not be dma-safe)
4339 * @n_rx: size of rxbuf, in bytes
4340 * Context: can sleep
4341 *
4342 * This performs a half duplex MicroWire style transaction with the
4343 * device, sending txbuf and then reading rxbuf. The return value
4344 * is zero for success, else a negative errno status code.
4345 * This call may only be used from a context that may sleep.
4346 *
4347 * Parameters to this routine are always copied using a small buffer.
4348 * Performance-sensitive or bulk transfer code should instead use
4349 * spi_{async,sync}() calls with dma-safe buffers.
4350 *
4351 * Return: zero on success, else a negative error code.
4352 */
4353int spi_write_then_read(struct spi_device *spi,
4354 const void *txbuf, unsigned n_tx,
4355 void *rxbuf, unsigned n_rx)
4356{
4357 static DEFINE_MUTEX(lock);
4358
4359 int status;
4360 struct spi_message message;
4361 struct spi_transfer x[2];
4362 u8 *local_buf;
4363
4364 /*
4365 * Use preallocated DMA-safe buffer if we can. We can't avoid
4366 * copying here, (as a pure convenience thing), but we can
4367 * keep heap costs out of the hot path unless someone else is
4368 * using the pre-allocated buffer or the transfer is too large.
4369 */
4370 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4371 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4372 GFP_KERNEL | GFP_DMA);
4373 if (!local_buf)
4374 return -ENOMEM;
4375 } else {
4376 local_buf = buf;
4377 }
4378
4379 spi_message_init(&message);
4380 memset(x, 0, sizeof(x));
4381 if (n_tx) {
4382 x[0].len = n_tx;
4383 spi_message_add_tail(&x[0], &message);
4384 }
4385 if (n_rx) {
4386 x[1].len = n_rx;
4387 spi_message_add_tail(&x[1], &message);
4388 }
4389
4390 memcpy(local_buf, txbuf, n_tx);
4391 x[0].tx_buf = local_buf;
4392 x[1].rx_buf = local_buf + n_tx;
4393
4394 /* Do the i/o */
4395 status = spi_sync(spi, &message);
4396 if (status == 0)
4397 memcpy(rxbuf, x[1].rx_buf, n_rx);
4398
4399 if (x[0].tx_buf == buf)
4400 mutex_unlock(&lock);
4401 else
4402 kfree(local_buf);
4403
4404 return status;
4405}
4406EXPORT_SYMBOL_GPL(spi_write_then_read);
4407
4408/*-------------------------------------------------------------------------*/
4409
4410#if IS_ENABLED(CONFIG_OF_DYNAMIC)
4411/* Must call put_device() when done with returned spi_device device */
4412static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4413{
4414 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4415
4416 return dev ? to_spi_device(dev) : NULL;
4417}
4418
4419/* The spi controllers are not using spi_bus, so we find it with another way */
4420static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4421{
4422 struct device *dev;
4423
4424 dev = class_find_device_by_of_node(&spi_master_class, node);
4425 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4426 dev = class_find_device_by_of_node(&spi_slave_class, node);
4427 if (!dev)
4428 return NULL;
4429
4430 /* Reference got in class_find_device */
4431 return container_of(dev, struct spi_controller, dev);
4432}
4433
4434static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4435 void *arg)
4436{
4437 struct of_reconfig_data *rd = arg;
4438 struct spi_controller *ctlr;
4439 struct spi_device *spi;
4440
4441 switch (of_reconfig_get_state_change(action, arg)) {
4442 case OF_RECONFIG_CHANGE_ADD:
4443 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4444 if (ctlr == NULL)
4445 return NOTIFY_OK; /* Not for us */
4446
4447 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4448 put_device(&ctlr->dev);
4449 return NOTIFY_OK;
4450 }
4451
4452 spi = of_register_spi_device(ctlr, rd->dn);
4453 put_device(&ctlr->dev);
4454
4455 if (IS_ERR(spi)) {
4456 pr_err("%s: failed to create for '%pOF'\n",
4457 __func__, rd->dn);
4458 of_node_clear_flag(rd->dn, OF_POPULATED);
4459 return notifier_from_errno(PTR_ERR(spi));
4460 }
4461 break;
4462
4463 case OF_RECONFIG_CHANGE_REMOVE:
4464 /* Already depopulated? */
4465 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4466 return NOTIFY_OK;
4467
4468 /* Find our device by node */
4469 spi = of_find_spi_device_by_node(rd->dn);
4470 if (spi == NULL)
4471 return NOTIFY_OK; /* No? not meant for us */
4472
4473 /* Unregister takes one ref away */
4474 spi_unregister_device(spi);
4475
4476 /* And put the reference of the find */
4477 put_device(&spi->dev);
4478 break;
4479 }
4480
4481 return NOTIFY_OK;
4482}
4483
4484static struct notifier_block spi_of_notifier = {
4485 .notifier_call = of_spi_notify,
4486};
4487#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4488extern struct notifier_block spi_of_notifier;
4489#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4490
4491#if IS_ENABLED(CONFIG_ACPI)
4492static int spi_acpi_controller_match(struct device *dev, const void *data)
4493{
4494 return ACPI_COMPANION(dev->parent) == data;
4495}
4496
4497static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4498{
4499 struct device *dev;
4500
4501 dev = class_find_device(&spi_master_class, NULL, adev,
4502 spi_acpi_controller_match);
4503 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4504 dev = class_find_device(&spi_slave_class, NULL, adev,
4505 spi_acpi_controller_match);
4506 if (!dev)
4507 return NULL;
4508
4509 return container_of(dev, struct spi_controller, dev);
4510}
4511
4512static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4513{
4514 struct device *dev;
4515
4516 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4517 return to_spi_device(dev);
4518}
4519
4520static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4521 void *arg)
4522{
4523 struct acpi_device *adev = arg;
4524 struct spi_controller *ctlr;
4525 struct spi_device *spi;
4526
4527 switch (value) {
4528 case ACPI_RECONFIG_DEVICE_ADD:
4529 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4530 if (!ctlr)
4531 break;
4532
4533 acpi_register_spi_device(ctlr, adev);
4534 put_device(&ctlr->dev);
4535 break;
4536 case ACPI_RECONFIG_DEVICE_REMOVE:
4537 if (!acpi_device_enumerated(adev))
4538 break;
4539
4540 spi = acpi_spi_find_device_by_adev(adev);
4541 if (!spi)
4542 break;
4543
4544 spi_unregister_device(spi);
4545 put_device(&spi->dev);
4546 break;
4547 }
4548
4549 return NOTIFY_OK;
4550}
4551
4552static struct notifier_block spi_acpi_notifier = {
4553 .notifier_call = acpi_spi_notify,
4554};
4555#else
4556extern struct notifier_block spi_acpi_notifier;
4557#endif
4558
4559static int __init spi_init(void)
4560{
4561 int status;
4562
4563 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4564 if (!buf) {
4565 status = -ENOMEM;
4566 goto err0;
4567 }
4568
4569 status = bus_register(&spi_bus_type);
4570 if (status < 0)
4571 goto err1;
4572
4573 status = class_register(&spi_master_class);
4574 if (status < 0)
4575 goto err2;
4576
4577 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4578 status = class_register(&spi_slave_class);
4579 if (status < 0)
4580 goto err3;
4581 }
4582
4583 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4584 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4585 if (IS_ENABLED(CONFIG_ACPI))
4586 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4587
4588 return 0;
4589
4590err3:
4591 class_unregister(&spi_master_class);
4592err2:
4593 bus_unregister(&spi_bus_type);
4594err1:
4595 kfree(buf);
4596 buf = NULL;
4597err0:
4598 return status;
4599}
4600
4601/*
4602 * A board_info is normally registered in arch_initcall(),
4603 * but even essential drivers wait till later.
4604 *
4605 * REVISIT only boardinfo really needs static linking. The rest (device and
4606 * driver registration) _could_ be dynamically linked (modular) ... Costs
4607 * include needing to have boardinfo data structures be much more public.
4608 */
4609postcore_initcall(spi_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2// SPI init/core code
3//
4// Copyright (C) 2005 David Brownell
5// Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7#include <linux/acpi.h>
8#include <linux/cache.h>
9#include <linux/clk/clk-conf.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/dmaengine.h>
13#include <linux/dma-mapping.h>
14#include <linux/export.h>
15#include <linux/gpio/consumer.h>
16#include <linux/highmem.h>
17#include <linux/idr.h>
18#include <linux/init.h>
19#include <linux/ioport.h>
20#include <linux/kernel.h>
21#include <linux/kthread.h>
22#include <linux/mod_devicetable.h>
23#include <linux/mutex.h>
24#include <linux/of_device.h>
25#include <linux/of_irq.h>
26#include <linux/percpu.h>
27#include <linux/platform_data/x86/apple.h>
28#include <linux/pm_domain.h>
29#include <linux/pm_runtime.h>
30#include <linux/property.h>
31#include <linux/ptp_clock_kernel.h>
32#include <linux/sched/rt.h>
33#include <linux/slab.h>
34#include <linux/spi/spi.h>
35#include <linux/spi/spi-mem.h>
36#include <uapi/linux/sched/types.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/spi.h>
40EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43#include "internals.h"
44
45static DEFINE_IDR(spi_master_idr);
46
47static void spidev_release(struct device *dev)
48{
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55}
56
57static ssize_t
58modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59{
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68}
69static DEVICE_ATTR_RO(modalias);
70
71static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74{
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83}
84
85static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87{
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95}
96static DEVICE_ATTR_RW(driver_override);
97
98static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99{
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118}
119
120static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 char *buf, size_t offset)
122{
123 u64 val = 0;
124 int i;
125
126 for_each_possible_cpu(i) {
127 const struct spi_statistics *pcpu_stats;
128 u64_stats_t *field;
129 unsigned int start;
130 u64 inc;
131
132 pcpu_stats = per_cpu_ptr(stat, i);
133 field = (void *)pcpu_stats + offset;
134 do {
135 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 inc = u64_stats_read(field);
137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 val += inc;
139 }
140 return sysfs_emit(buf, "%llu\n", val);
141}
142
143#define SPI_STATISTICS_ATTRS(field, file) \
144static ssize_t spi_controller_##field##_show(struct device *dev, \
145 struct device_attribute *attr, \
146 char *buf) \
147{ \
148 struct spi_controller *ctlr = container_of(dev, \
149 struct spi_controller, dev); \
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151} \
152static struct device_attribute dev_attr_spi_controller_##field = { \
153 .attr = { .name = file, .mode = 0444 }, \
154 .show = spi_controller_##field##_show, \
155}; \
156static ssize_t spi_device_##field##_show(struct device *dev, \
157 struct device_attribute *attr, \
158 char *buf) \
159{ \
160 struct spi_device *spi = to_spi_device(dev); \
161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162} \
163static struct device_attribute dev_attr_spi_device_##field = { \
164 .attr = { .name = file, .mode = 0444 }, \
165 .show = spi_device_##field##_show, \
166}
167
168#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
169static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 char *buf) \
171{ \
172 return spi_emit_pcpu_stats(stat, buf, \
173 offsetof(struct spi_statistics, field)); \
174} \
175SPI_STATISTICS_ATTRS(name, file)
176
177#define SPI_STATISTICS_SHOW(field) \
178 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
179 field)
180
181SPI_STATISTICS_SHOW(messages);
182SPI_STATISTICS_SHOW(transfers);
183SPI_STATISTICS_SHOW(errors);
184SPI_STATISTICS_SHOW(timedout);
185
186SPI_STATISTICS_SHOW(spi_sync);
187SPI_STATISTICS_SHOW(spi_sync_immediate);
188SPI_STATISTICS_SHOW(spi_async);
189
190SPI_STATISTICS_SHOW(bytes);
191SPI_STATISTICS_SHOW(bytes_rx);
192SPI_STATISTICS_SHOW(bytes_tx);
193
194#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
195 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
196 "transfer_bytes_histo_" number, \
197 transfer_bytes_histo[index])
198SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
199SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
200SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
201SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
202SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
203SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
204SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
205SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
206SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
207SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
208SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215
216SPI_STATISTICS_SHOW(transfers_split_maxsize);
217
218static struct attribute *spi_dev_attrs[] = {
219 &dev_attr_modalias.attr,
220 &dev_attr_driver_override.attr,
221 NULL,
222};
223
224static const struct attribute_group spi_dev_group = {
225 .attrs = spi_dev_attrs,
226};
227
228static struct attribute *spi_device_statistics_attrs[] = {
229 &dev_attr_spi_device_messages.attr,
230 &dev_attr_spi_device_transfers.attr,
231 &dev_attr_spi_device_errors.attr,
232 &dev_attr_spi_device_timedout.attr,
233 &dev_attr_spi_device_spi_sync.attr,
234 &dev_attr_spi_device_spi_sync_immediate.attr,
235 &dev_attr_spi_device_spi_async.attr,
236 &dev_attr_spi_device_bytes.attr,
237 &dev_attr_spi_device_bytes_rx.attr,
238 &dev_attr_spi_device_bytes_tx.attr,
239 &dev_attr_spi_device_transfer_bytes_histo0.attr,
240 &dev_attr_spi_device_transfer_bytes_histo1.attr,
241 &dev_attr_spi_device_transfer_bytes_histo2.attr,
242 &dev_attr_spi_device_transfer_bytes_histo3.attr,
243 &dev_attr_spi_device_transfer_bytes_histo4.attr,
244 &dev_attr_spi_device_transfer_bytes_histo5.attr,
245 &dev_attr_spi_device_transfer_bytes_histo6.attr,
246 &dev_attr_spi_device_transfer_bytes_histo7.attr,
247 &dev_attr_spi_device_transfer_bytes_histo8.attr,
248 &dev_attr_spi_device_transfer_bytes_histo9.attr,
249 &dev_attr_spi_device_transfer_bytes_histo10.attr,
250 &dev_attr_spi_device_transfer_bytes_histo11.attr,
251 &dev_attr_spi_device_transfer_bytes_histo12.attr,
252 &dev_attr_spi_device_transfer_bytes_histo13.attr,
253 &dev_attr_spi_device_transfer_bytes_histo14.attr,
254 &dev_attr_spi_device_transfer_bytes_histo15.attr,
255 &dev_attr_spi_device_transfer_bytes_histo16.attr,
256 &dev_attr_spi_device_transfers_split_maxsize.attr,
257 NULL,
258};
259
260static const struct attribute_group spi_device_statistics_group = {
261 .name = "statistics",
262 .attrs = spi_device_statistics_attrs,
263};
264
265static const struct attribute_group *spi_dev_groups[] = {
266 &spi_dev_group,
267 &spi_device_statistics_group,
268 NULL,
269};
270
271static struct attribute *spi_controller_statistics_attrs[] = {
272 &dev_attr_spi_controller_messages.attr,
273 &dev_attr_spi_controller_transfers.attr,
274 &dev_attr_spi_controller_errors.attr,
275 &dev_attr_spi_controller_timedout.attr,
276 &dev_attr_spi_controller_spi_sync.attr,
277 &dev_attr_spi_controller_spi_sync_immediate.attr,
278 &dev_attr_spi_controller_spi_async.attr,
279 &dev_attr_spi_controller_bytes.attr,
280 &dev_attr_spi_controller_bytes_rx.attr,
281 &dev_attr_spi_controller_bytes_tx.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 &dev_attr_spi_controller_transfers_split_maxsize.attr,
300 NULL,
301};
302
303static const struct attribute_group spi_controller_statistics_group = {
304 .name = "statistics",
305 .attrs = spi_controller_statistics_attrs,
306};
307
308static const struct attribute_group *spi_master_groups[] = {
309 &spi_controller_statistics_group,
310 NULL,
311};
312
313static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 struct spi_transfer *xfer,
315 struct spi_controller *ctlr)
316{
317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 struct spi_statistics *stats;
319
320 if (l2len < 0)
321 l2len = 0;
322
323 get_cpu();
324 stats = this_cpu_ptr(pcpu_stats);
325 u64_stats_update_begin(&stats->syncp);
326
327 u64_stats_inc(&stats->transfers);
328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329
330 u64_stats_add(&stats->bytes, xfer->len);
331 if ((xfer->tx_buf) &&
332 (xfer->tx_buf != ctlr->dummy_tx))
333 u64_stats_add(&stats->bytes_tx, xfer->len);
334 if ((xfer->rx_buf) &&
335 (xfer->rx_buf != ctlr->dummy_rx))
336 u64_stats_add(&stats->bytes_rx, xfer->len);
337
338 u64_stats_update_end(&stats->syncp);
339 put_cpu();
340}
341
342/*
343 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
344 * and the sysfs version makes coldplug work too.
345 */
346static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
347{
348 while (id->name[0]) {
349 if (!strcmp(name, id->name))
350 return id;
351 id++;
352 }
353 return NULL;
354}
355
356const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
357{
358 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
359
360 return spi_match_id(sdrv->id_table, sdev->modalias);
361}
362EXPORT_SYMBOL_GPL(spi_get_device_id);
363
364const void *spi_get_device_match_data(const struct spi_device *sdev)
365{
366 const void *match;
367
368 match = device_get_match_data(&sdev->dev);
369 if (match)
370 return match;
371
372 return (const void *)spi_get_device_id(sdev)->driver_data;
373}
374EXPORT_SYMBOL_GPL(spi_get_device_match_data);
375
376static int spi_match_device(struct device *dev, struct device_driver *drv)
377{
378 const struct spi_device *spi = to_spi_device(dev);
379 const struct spi_driver *sdrv = to_spi_driver(drv);
380
381 /* Check override first, and if set, only use the named driver */
382 if (spi->driver_override)
383 return strcmp(spi->driver_override, drv->name) == 0;
384
385 /* Attempt an OF style match */
386 if (of_driver_match_device(dev, drv))
387 return 1;
388
389 /* Then try ACPI */
390 if (acpi_driver_match_device(dev, drv))
391 return 1;
392
393 if (sdrv->id_table)
394 return !!spi_match_id(sdrv->id_table, spi->modalias);
395
396 return strcmp(spi->modalias, drv->name) == 0;
397}
398
399static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
400{
401 const struct spi_device *spi = to_spi_device(dev);
402 int rc;
403
404 rc = acpi_device_uevent_modalias(dev, env);
405 if (rc != -ENODEV)
406 return rc;
407
408 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
409}
410
411static int spi_probe(struct device *dev)
412{
413 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
414 struct spi_device *spi = to_spi_device(dev);
415 int ret;
416
417 ret = of_clk_set_defaults(dev->of_node, false);
418 if (ret)
419 return ret;
420
421 if (dev->of_node) {
422 spi->irq = of_irq_get(dev->of_node, 0);
423 if (spi->irq == -EPROBE_DEFER)
424 return -EPROBE_DEFER;
425 if (spi->irq < 0)
426 spi->irq = 0;
427 }
428
429 ret = dev_pm_domain_attach(dev, true);
430 if (ret)
431 return ret;
432
433 if (sdrv->probe) {
434 ret = sdrv->probe(spi);
435 if (ret)
436 dev_pm_domain_detach(dev, true);
437 }
438
439 return ret;
440}
441
442static void spi_remove(struct device *dev)
443{
444 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
445
446 if (sdrv->remove)
447 sdrv->remove(to_spi_device(dev));
448
449 dev_pm_domain_detach(dev, true);
450}
451
452static void spi_shutdown(struct device *dev)
453{
454 if (dev->driver) {
455 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
456
457 if (sdrv->shutdown)
458 sdrv->shutdown(to_spi_device(dev));
459 }
460}
461
462struct bus_type spi_bus_type = {
463 .name = "spi",
464 .dev_groups = spi_dev_groups,
465 .match = spi_match_device,
466 .uevent = spi_uevent,
467 .probe = spi_probe,
468 .remove = spi_remove,
469 .shutdown = spi_shutdown,
470};
471EXPORT_SYMBOL_GPL(spi_bus_type);
472
473/**
474 * __spi_register_driver - register a SPI driver
475 * @owner: owner module of the driver to register
476 * @sdrv: the driver to register
477 * Context: can sleep
478 *
479 * Return: zero on success, else a negative error code.
480 */
481int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
482{
483 sdrv->driver.owner = owner;
484 sdrv->driver.bus = &spi_bus_type;
485
486 /*
487 * For Really Good Reasons we use spi: modaliases not of:
488 * modaliases for DT so module autoloading won't work if we
489 * don't have a spi_device_id as well as a compatible string.
490 */
491 if (sdrv->driver.of_match_table) {
492 const struct of_device_id *of_id;
493
494 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
495 of_id++) {
496 const char *of_name;
497
498 /* Strip off any vendor prefix */
499 of_name = strnchr(of_id->compatible,
500 sizeof(of_id->compatible), ',');
501 if (of_name)
502 of_name++;
503 else
504 of_name = of_id->compatible;
505
506 if (sdrv->id_table) {
507 const struct spi_device_id *spi_id;
508
509 spi_id = spi_match_id(sdrv->id_table, of_name);
510 if (spi_id)
511 continue;
512 } else {
513 if (strcmp(sdrv->driver.name, of_name) == 0)
514 continue;
515 }
516
517 pr_warn("SPI driver %s has no spi_device_id for %s\n",
518 sdrv->driver.name, of_id->compatible);
519 }
520 }
521
522 return driver_register(&sdrv->driver);
523}
524EXPORT_SYMBOL_GPL(__spi_register_driver);
525
526/*-------------------------------------------------------------------------*/
527
528/*
529 * SPI devices should normally not be created by SPI device drivers; that
530 * would make them board-specific. Similarly with SPI controller drivers.
531 * Device registration normally goes into like arch/.../mach.../board-YYY.c
532 * with other readonly (flashable) information about mainboard devices.
533 */
534
535struct boardinfo {
536 struct list_head list;
537 struct spi_board_info board_info;
538};
539
540static LIST_HEAD(board_list);
541static LIST_HEAD(spi_controller_list);
542
543/*
544 * Used to protect add/del operation for board_info list and
545 * spi_controller list, and their matching process also used
546 * to protect object of type struct idr.
547 */
548static DEFINE_MUTEX(board_lock);
549
550/**
551 * spi_alloc_device - Allocate a new SPI device
552 * @ctlr: Controller to which device is connected
553 * Context: can sleep
554 *
555 * Allows a driver to allocate and initialize a spi_device without
556 * registering it immediately. This allows a driver to directly
557 * fill the spi_device with device parameters before calling
558 * spi_add_device() on it.
559 *
560 * Caller is responsible to call spi_add_device() on the returned
561 * spi_device structure to add it to the SPI controller. If the caller
562 * needs to discard the spi_device without adding it, then it should
563 * call spi_dev_put() on it.
564 *
565 * Return: a pointer to the new device, or NULL.
566 */
567struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
568{
569 struct spi_device *spi;
570
571 if (!spi_controller_get(ctlr))
572 return NULL;
573
574 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
575 if (!spi) {
576 spi_controller_put(ctlr);
577 return NULL;
578 }
579
580 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
581 if (!spi->pcpu_statistics) {
582 kfree(spi);
583 spi_controller_put(ctlr);
584 return NULL;
585 }
586
587 spi->master = spi->controller = ctlr;
588 spi->dev.parent = &ctlr->dev;
589 spi->dev.bus = &spi_bus_type;
590 spi->dev.release = spidev_release;
591 spi->mode = ctlr->buswidth_override_bits;
592
593 device_initialize(&spi->dev);
594 return spi;
595}
596EXPORT_SYMBOL_GPL(spi_alloc_device);
597
598static void spi_dev_set_name(struct spi_device *spi)
599{
600 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
601
602 if (adev) {
603 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
604 return;
605 }
606
607 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
608 spi_get_chipselect(spi, 0));
609}
610
611static int spi_dev_check(struct device *dev, void *data)
612{
613 struct spi_device *spi = to_spi_device(dev);
614 struct spi_device *new_spi = data;
615 int idx, nw_idx;
616 u8 cs, cs_nw;
617
618 if (spi->controller == new_spi->controller) {
619 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
620 cs = spi_get_chipselect(spi, idx);
621 for (nw_idx = 0; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
622 cs_nw = spi_get_chipselect(new_spi, nw_idx);
623 if (cs != 0xFF && cs_nw != 0xFF && cs == cs_nw) {
624 dev_err(dev, "chipselect %d already in use\n", cs_nw);
625 return -EBUSY;
626 }
627 }
628 }
629 }
630 return 0;
631}
632
633static void spi_cleanup(struct spi_device *spi)
634{
635 if (spi->controller->cleanup)
636 spi->controller->cleanup(spi);
637}
638
639static int __spi_add_device(struct spi_device *spi)
640{
641 struct spi_controller *ctlr = spi->controller;
642 struct device *dev = ctlr->dev.parent;
643 int status, idx, nw_idx;
644 u8 cs, nw_cs;
645
646 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
647 /* Chipselects are numbered 0..max; validate. */
648 cs = spi_get_chipselect(spi, idx);
649 if (cs != 0xFF && cs >= ctlr->num_chipselect) {
650 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
651 ctlr->num_chipselect);
652 return -EINVAL;
653 }
654 }
655
656 /*
657 * Make sure that multiple logical CS doesn't map to the same physical CS.
658 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
659 */
660 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
661 cs = spi_get_chipselect(spi, idx);
662 for (nw_idx = idx + 1; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
663 nw_cs = spi_get_chipselect(spi, nw_idx);
664 if (cs != 0xFF && nw_cs != 0xFF && cs == nw_cs) {
665 dev_err(dev, "chipselect %d already in use\n", nw_cs);
666 return -EBUSY;
667 }
668 }
669 }
670
671 /* Set the bus ID string */
672 spi_dev_set_name(spi);
673
674 /*
675 * We need to make sure there's no other device with this
676 * chipselect **BEFORE** we call setup(), else we'll trash
677 * its configuration.
678 */
679 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
680 if (status)
681 return status;
682
683 /* Controller may unregister concurrently */
684 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
685 !device_is_registered(&ctlr->dev)) {
686 return -ENODEV;
687 }
688
689 if (ctlr->cs_gpiods) {
690 u8 cs;
691
692 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
693 cs = spi_get_chipselect(spi, idx);
694 if (cs != 0xFF)
695 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
696 }
697 }
698
699 /*
700 * Drivers may modify this initial i/o setup, but will
701 * normally rely on the device being setup. Devices
702 * using SPI_CS_HIGH can't coexist well otherwise...
703 */
704 status = spi_setup(spi);
705 if (status < 0) {
706 dev_err(dev, "can't setup %s, status %d\n",
707 dev_name(&spi->dev), status);
708 return status;
709 }
710
711 /* Device may be bound to an active driver when this returns */
712 status = device_add(&spi->dev);
713 if (status < 0) {
714 dev_err(dev, "can't add %s, status %d\n",
715 dev_name(&spi->dev), status);
716 spi_cleanup(spi);
717 } else {
718 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
719 }
720
721 return status;
722}
723
724/**
725 * spi_add_device - Add spi_device allocated with spi_alloc_device
726 * @spi: spi_device to register
727 *
728 * Companion function to spi_alloc_device. Devices allocated with
729 * spi_alloc_device can be added onto the SPI bus with this function.
730 *
731 * Return: 0 on success; negative errno on failure
732 */
733int spi_add_device(struct spi_device *spi)
734{
735 struct spi_controller *ctlr = spi->controller;
736 int status;
737
738 /* Set the bus ID string */
739 spi_dev_set_name(spi);
740
741 mutex_lock(&ctlr->add_lock);
742 status = __spi_add_device(spi);
743 mutex_unlock(&ctlr->add_lock);
744 return status;
745}
746EXPORT_SYMBOL_GPL(spi_add_device);
747
748/**
749 * spi_new_device - instantiate one new SPI device
750 * @ctlr: Controller to which device is connected
751 * @chip: Describes the SPI device
752 * Context: can sleep
753 *
754 * On typical mainboards, this is purely internal; and it's not needed
755 * after board init creates the hard-wired devices. Some development
756 * platforms may not be able to use spi_register_board_info though, and
757 * this is exported so that for example a USB or parport based adapter
758 * driver could add devices (which it would learn about out-of-band).
759 *
760 * Return: the new device, or NULL.
761 */
762struct spi_device *spi_new_device(struct spi_controller *ctlr,
763 struct spi_board_info *chip)
764{
765 struct spi_device *proxy;
766 int status;
767 u8 idx;
768
769 /*
770 * NOTE: caller did any chip->bus_num checks necessary.
771 *
772 * Also, unless we change the return value convention to use
773 * error-or-pointer (not NULL-or-pointer), troubleshootability
774 * suggests syslogged diagnostics are best here (ugh).
775 */
776
777 proxy = spi_alloc_device(ctlr);
778 if (!proxy)
779 return NULL;
780
781 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
782
783 /*
784 * Zero(0) is a valid physical CS value and can be located at any
785 * logical CS in the spi->chip_select[]. If all the physical CS
786 * are initialized to 0 then It would be difficult to differentiate
787 * between a valid physical CS 0 & an unused logical CS whose physical
788 * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
789 * Now all the unused logical CS will have 0xFF physical CS value & can be
790 * ignore while performing physical CS validity checks.
791 */
792 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
793 spi_set_chipselect(proxy, idx, 0xFF);
794
795 spi_set_chipselect(proxy, 0, chip->chip_select);
796 proxy->max_speed_hz = chip->max_speed_hz;
797 proxy->mode = chip->mode;
798 proxy->irq = chip->irq;
799 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
800 proxy->dev.platform_data = (void *) chip->platform_data;
801 proxy->controller_data = chip->controller_data;
802 proxy->controller_state = NULL;
803 /*
804 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
805 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
806 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
807 * spi->chip_select[0] will give the physical CS.
808 * By default spi->chip_select[0] will hold the physical CS number so, set
809 * spi->cs_index_mask as 0x01.
810 */
811 proxy->cs_index_mask = 0x01;
812
813 if (chip->swnode) {
814 status = device_add_software_node(&proxy->dev, chip->swnode);
815 if (status) {
816 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
817 chip->modalias, status);
818 goto err_dev_put;
819 }
820 }
821
822 status = spi_add_device(proxy);
823 if (status < 0)
824 goto err_dev_put;
825
826 return proxy;
827
828err_dev_put:
829 device_remove_software_node(&proxy->dev);
830 spi_dev_put(proxy);
831 return NULL;
832}
833EXPORT_SYMBOL_GPL(spi_new_device);
834
835/**
836 * spi_unregister_device - unregister a single SPI device
837 * @spi: spi_device to unregister
838 *
839 * Start making the passed SPI device vanish. Normally this would be handled
840 * by spi_unregister_controller().
841 */
842void spi_unregister_device(struct spi_device *spi)
843{
844 if (!spi)
845 return;
846
847 if (spi->dev.of_node) {
848 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
849 of_node_put(spi->dev.of_node);
850 }
851 if (ACPI_COMPANION(&spi->dev))
852 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
853 device_remove_software_node(&spi->dev);
854 device_del(&spi->dev);
855 spi_cleanup(spi);
856 put_device(&spi->dev);
857}
858EXPORT_SYMBOL_GPL(spi_unregister_device);
859
860static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
861 struct spi_board_info *bi)
862{
863 struct spi_device *dev;
864
865 if (ctlr->bus_num != bi->bus_num)
866 return;
867
868 dev = spi_new_device(ctlr, bi);
869 if (!dev)
870 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
871 bi->modalias);
872}
873
874/**
875 * spi_register_board_info - register SPI devices for a given board
876 * @info: array of chip descriptors
877 * @n: how many descriptors are provided
878 * Context: can sleep
879 *
880 * Board-specific early init code calls this (probably during arch_initcall)
881 * with segments of the SPI device table. Any device nodes are created later,
882 * after the relevant parent SPI controller (bus_num) is defined. We keep
883 * this table of devices forever, so that reloading a controller driver will
884 * not make Linux forget about these hard-wired devices.
885 *
886 * Other code can also call this, e.g. a particular add-on board might provide
887 * SPI devices through its expansion connector, so code initializing that board
888 * would naturally declare its SPI devices.
889 *
890 * The board info passed can safely be __initdata ... but be careful of
891 * any embedded pointers (platform_data, etc), they're copied as-is.
892 *
893 * Return: zero on success, else a negative error code.
894 */
895int spi_register_board_info(struct spi_board_info const *info, unsigned n)
896{
897 struct boardinfo *bi;
898 int i;
899
900 if (!n)
901 return 0;
902
903 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
904 if (!bi)
905 return -ENOMEM;
906
907 for (i = 0; i < n; i++, bi++, info++) {
908 struct spi_controller *ctlr;
909
910 memcpy(&bi->board_info, info, sizeof(*info));
911
912 mutex_lock(&board_lock);
913 list_add_tail(&bi->list, &board_list);
914 list_for_each_entry(ctlr, &spi_controller_list, list)
915 spi_match_controller_to_boardinfo(ctlr,
916 &bi->board_info);
917 mutex_unlock(&board_lock);
918 }
919
920 return 0;
921}
922
923/*-------------------------------------------------------------------------*/
924
925/* Core methods for SPI resource management */
926
927/**
928 * spi_res_alloc - allocate a spi resource that is life-cycle managed
929 * during the processing of a spi_message while using
930 * spi_transfer_one
931 * @spi: the SPI device for which we allocate memory
932 * @release: the release code to execute for this resource
933 * @size: size to alloc and return
934 * @gfp: GFP allocation flags
935 *
936 * Return: the pointer to the allocated data
937 *
938 * This may get enhanced in the future to allocate from a memory pool
939 * of the @spi_device or @spi_controller to avoid repeated allocations.
940 */
941static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
942 size_t size, gfp_t gfp)
943{
944 struct spi_res *sres;
945
946 sres = kzalloc(sizeof(*sres) + size, gfp);
947 if (!sres)
948 return NULL;
949
950 INIT_LIST_HEAD(&sres->entry);
951 sres->release = release;
952
953 return sres->data;
954}
955
956/**
957 * spi_res_free - free an SPI resource
958 * @res: pointer to the custom data of a resource
959 */
960static void spi_res_free(void *res)
961{
962 struct spi_res *sres = container_of(res, struct spi_res, data);
963
964 if (!res)
965 return;
966
967 WARN_ON(!list_empty(&sres->entry));
968 kfree(sres);
969}
970
971/**
972 * spi_res_add - add a spi_res to the spi_message
973 * @message: the SPI message
974 * @res: the spi_resource
975 */
976static void spi_res_add(struct spi_message *message, void *res)
977{
978 struct spi_res *sres = container_of(res, struct spi_res, data);
979
980 WARN_ON(!list_empty(&sres->entry));
981 list_add_tail(&sres->entry, &message->resources);
982}
983
984/**
985 * spi_res_release - release all SPI resources for this message
986 * @ctlr: the @spi_controller
987 * @message: the @spi_message
988 */
989static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
990{
991 struct spi_res *res, *tmp;
992
993 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
994 if (res->release)
995 res->release(ctlr, message, res->data);
996
997 list_del(&res->entry);
998
999 kfree(res);
1000 }
1001}
1002
1003/*-------------------------------------------------------------------------*/
1004static inline bool spi_is_last_cs(struct spi_device *spi)
1005{
1006 u8 idx;
1007 bool last = false;
1008
1009 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
1010 if ((spi->cs_index_mask >> idx) & 0x01) {
1011 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1012 last = true;
1013 }
1014 }
1015 return last;
1016}
1017
1018
1019static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1020{
1021 bool activate = enable;
1022 u8 idx;
1023
1024 /*
1025 * Avoid calling into the driver (or doing delays) if the chip select
1026 * isn't actually changing from the last time this was called.
1027 */
1028 if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1029 spi_is_last_cs(spi)) ||
1030 (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1031 !spi_is_last_cs(spi))) &&
1032 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1033 return;
1034
1035 trace_spi_set_cs(spi, activate);
1036
1037 spi->controller->last_cs_index_mask = spi->cs_index_mask;
1038 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1039 spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : -1;
1040 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1041
1042 if (spi->mode & SPI_CS_HIGH)
1043 enable = !enable;
1044
1045 if (spi_is_csgpiod(spi)) {
1046 if (!spi->controller->set_cs_timing && !activate)
1047 spi_delay_exec(&spi->cs_hold, NULL);
1048
1049 if (!(spi->mode & SPI_NO_CS)) {
1050 /*
1051 * Historically ACPI has no means of the GPIO polarity and
1052 * thus the SPISerialBus() resource defines it on the per-chip
1053 * basis. In order to avoid a chain of negations, the GPIO
1054 * polarity is considered being Active High. Even for the cases
1055 * when _DSD() is involved (in the updated versions of ACPI)
1056 * the GPIO CS polarity must be defined Active High to avoid
1057 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1058 * into account.
1059 */
1060 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
1061 if (((spi->cs_index_mask >> idx) & 0x01) &&
1062 spi_get_csgpiod(spi, idx)) {
1063 if (has_acpi_companion(&spi->dev))
1064 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
1065 !enable);
1066 else
1067 /* Polarity handled by GPIO library */
1068 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
1069 activate);
1070
1071 if (activate)
1072 spi_delay_exec(&spi->cs_setup, NULL);
1073 else
1074 spi_delay_exec(&spi->cs_inactive, NULL);
1075 }
1076 }
1077 }
1078 /* Some SPI masters need both GPIO CS & slave_select */
1079 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1080 spi->controller->set_cs)
1081 spi->controller->set_cs(spi, !enable);
1082
1083 if (!spi->controller->set_cs_timing) {
1084 if (activate)
1085 spi_delay_exec(&spi->cs_setup, NULL);
1086 else
1087 spi_delay_exec(&spi->cs_inactive, NULL);
1088 }
1089 } else if (spi->controller->set_cs) {
1090 spi->controller->set_cs(spi, !enable);
1091 }
1092}
1093
1094#ifdef CONFIG_HAS_DMA
1095static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1096 struct sg_table *sgt, void *buf, size_t len,
1097 enum dma_data_direction dir, unsigned long attrs)
1098{
1099 const bool vmalloced_buf = is_vmalloc_addr(buf);
1100 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1101#ifdef CONFIG_HIGHMEM
1102 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1103 (unsigned long)buf < (PKMAP_BASE +
1104 (LAST_PKMAP * PAGE_SIZE)));
1105#else
1106 const bool kmap_buf = false;
1107#endif
1108 int desc_len;
1109 int sgs;
1110 struct page *vm_page;
1111 struct scatterlist *sg;
1112 void *sg_buf;
1113 size_t min;
1114 int i, ret;
1115
1116 if (vmalloced_buf || kmap_buf) {
1117 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1118 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1119 } else if (virt_addr_valid(buf)) {
1120 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1121 sgs = DIV_ROUND_UP(len, desc_len);
1122 } else {
1123 return -EINVAL;
1124 }
1125
1126 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1127 if (ret != 0)
1128 return ret;
1129
1130 sg = &sgt->sgl[0];
1131 for (i = 0; i < sgs; i++) {
1132
1133 if (vmalloced_buf || kmap_buf) {
1134 /*
1135 * Next scatterlist entry size is the minimum between
1136 * the desc_len and the remaining buffer length that
1137 * fits in a page.
1138 */
1139 min = min_t(size_t, desc_len,
1140 min_t(size_t, len,
1141 PAGE_SIZE - offset_in_page(buf)));
1142 if (vmalloced_buf)
1143 vm_page = vmalloc_to_page(buf);
1144 else
1145 vm_page = kmap_to_page(buf);
1146 if (!vm_page) {
1147 sg_free_table(sgt);
1148 return -ENOMEM;
1149 }
1150 sg_set_page(sg, vm_page,
1151 min, offset_in_page(buf));
1152 } else {
1153 min = min_t(size_t, len, desc_len);
1154 sg_buf = buf;
1155 sg_set_buf(sg, sg_buf, min);
1156 }
1157
1158 buf += min;
1159 len -= min;
1160 sg = sg_next(sg);
1161 }
1162
1163 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1164 if (ret < 0) {
1165 sg_free_table(sgt);
1166 return ret;
1167 }
1168
1169 return 0;
1170}
1171
1172int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1173 struct sg_table *sgt, void *buf, size_t len,
1174 enum dma_data_direction dir)
1175{
1176 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1177}
1178
1179static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1180 struct device *dev, struct sg_table *sgt,
1181 enum dma_data_direction dir,
1182 unsigned long attrs)
1183{
1184 if (sgt->orig_nents) {
1185 dma_unmap_sgtable(dev, sgt, dir, attrs);
1186 sg_free_table(sgt);
1187 sgt->orig_nents = 0;
1188 sgt->nents = 0;
1189 }
1190}
1191
1192void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1193 struct sg_table *sgt, enum dma_data_direction dir)
1194{
1195 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1196}
1197
1198static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1199{
1200 struct device *tx_dev, *rx_dev;
1201 struct spi_transfer *xfer;
1202 int ret;
1203
1204 if (!ctlr->can_dma)
1205 return 0;
1206
1207 if (ctlr->dma_tx)
1208 tx_dev = ctlr->dma_tx->device->dev;
1209 else if (ctlr->dma_map_dev)
1210 tx_dev = ctlr->dma_map_dev;
1211 else
1212 tx_dev = ctlr->dev.parent;
1213
1214 if (ctlr->dma_rx)
1215 rx_dev = ctlr->dma_rx->device->dev;
1216 else if (ctlr->dma_map_dev)
1217 rx_dev = ctlr->dma_map_dev;
1218 else
1219 rx_dev = ctlr->dev.parent;
1220
1221 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1222 /* The sync is done before each transfer. */
1223 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1224
1225 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1226 continue;
1227
1228 if (xfer->tx_buf != NULL) {
1229 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1230 (void *)xfer->tx_buf,
1231 xfer->len, DMA_TO_DEVICE,
1232 attrs);
1233 if (ret != 0)
1234 return ret;
1235 }
1236
1237 if (xfer->rx_buf != NULL) {
1238 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1239 xfer->rx_buf, xfer->len,
1240 DMA_FROM_DEVICE, attrs);
1241 if (ret != 0) {
1242 spi_unmap_buf_attrs(ctlr, tx_dev,
1243 &xfer->tx_sg, DMA_TO_DEVICE,
1244 attrs);
1245
1246 return ret;
1247 }
1248 }
1249 }
1250
1251 ctlr->cur_rx_dma_dev = rx_dev;
1252 ctlr->cur_tx_dma_dev = tx_dev;
1253 ctlr->cur_msg_mapped = true;
1254
1255 return 0;
1256}
1257
1258static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1259{
1260 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1261 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1262 struct spi_transfer *xfer;
1263
1264 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1265 return 0;
1266
1267 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1268 /* The sync has already been done after each transfer. */
1269 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1270
1271 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1272 continue;
1273
1274 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1275 DMA_FROM_DEVICE, attrs);
1276 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1277 DMA_TO_DEVICE, attrs);
1278 }
1279
1280 ctlr->cur_msg_mapped = false;
1281
1282 return 0;
1283}
1284
1285static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1286 struct spi_transfer *xfer)
1287{
1288 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1289 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1290
1291 if (!ctlr->cur_msg_mapped)
1292 return;
1293
1294 if (xfer->tx_sg.orig_nents)
1295 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1296 if (xfer->rx_sg.orig_nents)
1297 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1298}
1299
1300static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1301 struct spi_transfer *xfer)
1302{
1303 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1304 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1305
1306 if (!ctlr->cur_msg_mapped)
1307 return;
1308
1309 if (xfer->rx_sg.orig_nents)
1310 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1311 if (xfer->tx_sg.orig_nents)
1312 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1313}
1314#else /* !CONFIG_HAS_DMA */
1315static inline int __spi_map_msg(struct spi_controller *ctlr,
1316 struct spi_message *msg)
1317{
1318 return 0;
1319}
1320
1321static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1322 struct spi_message *msg)
1323{
1324 return 0;
1325}
1326
1327static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1328 struct spi_transfer *xfer)
1329{
1330}
1331
1332static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1333 struct spi_transfer *xfer)
1334{
1335}
1336#endif /* !CONFIG_HAS_DMA */
1337
1338static inline int spi_unmap_msg(struct spi_controller *ctlr,
1339 struct spi_message *msg)
1340{
1341 struct spi_transfer *xfer;
1342
1343 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1344 /*
1345 * Restore the original value of tx_buf or rx_buf if they are
1346 * NULL.
1347 */
1348 if (xfer->tx_buf == ctlr->dummy_tx)
1349 xfer->tx_buf = NULL;
1350 if (xfer->rx_buf == ctlr->dummy_rx)
1351 xfer->rx_buf = NULL;
1352 }
1353
1354 return __spi_unmap_msg(ctlr, msg);
1355}
1356
1357static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1358{
1359 struct spi_transfer *xfer;
1360 void *tmp;
1361 unsigned int max_tx, max_rx;
1362
1363 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1364 && !(msg->spi->mode & SPI_3WIRE)) {
1365 max_tx = 0;
1366 max_rx = 0;
1367
1368 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1369 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1370 !xfer->tx_buf)
1371 max_tx = max(xfer->len, max_tx);
1372 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1373 !xfer->rx_buf)
1374 max_rx = max(xfer->len, max_rx);
1375 }
1376
1377 if (max_tx) {
1378 tmp = krealloc(ctlr->dummy_tx, max_tx,
1379 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1380 if (!tmp)
1381 return -ENOMEM;
1382 ctlr->dummy_tx = tmp;
1383 }
1384
1385 if (max_rx) {
1386 tmp = krealloc(ctlr->dummy_rx, max_rx,
1387 GFP_KERNEL | GFP_DMA);
1388 if (!tmp)
1389 return -ENOMEM;
1390 ctlr->dummy_rx = tmp;
1391 }
1392
1393 if (max_tx || max_rx) {
1394 list_for_each_entry(xfer, &msg->transfers,
1395 transfer_list) {
1396 if (!xfer->len)
1397 continue;
1398 if (!xfer->tx_buf)
1399 xfer->tx_buf = ctlr->dummy_tx;
1400 if (!xfer->rx_buf)
1401 xfer->rx_buf = ctlr->dummy_rx;
1402 }
1403 }
1404 }
1405
1406 return __spi_map_msg(ctlr, msg);
1407}
1408
1409static int spi_transfer_wait(struct spi_controller *ctlr,
1410 struct spi_message *msg,
1411 struct spi_transfer *xfer)
1412{
1413 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1414 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1415 u32 speed_hz = xfer->speed_hz;
1416 unsigned long long ms;
1417
1418 if (spi_controller_is_slave(ctlr)) {
1419 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1420 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1421 return -EINTR;
1422 }
1423 } else {
1424 if (!speed_hz)
1425 speed_hz = 100000;
1426
1427 /*
1428 * For each byte we wait for 8 cycles of the SPI clock.
1429 * Since speed is defined in Hz and we want milliseconds,
1430 * use respective multiplier, but before the division,
1431 * otherwise we may get 0 for short transfers.
1432 */
1433 ms = 8LL * MSEC_PER_SEC * xfer->len;
1434 do_div(ms, speed_hz);
1435
1436 /*
1437 * Increase it twice and add 200 ms tolerance, use
1438 * predefined maximum in case of overflow.
1439 */
1440 ms += ms + 200;
1441 if (ms > UINT_MAX)
1442 ms = UINT_MAX;
1443
1444 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1445 msecs_to_jiffies(ms));
1446
1447 if (ms == 0) {
1448 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1449 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1450 dev_err(&msg->spi->dev,
1451 "SPI transfer timed out\n");
1452 return -ETIMEDOUT;
1453 }
1454
1455 if (xfer->error & SPI_TRANS_FAIL_IO)
1456 return -EIO;
1457 }
1458
1459 return 0;
1460}
1461
1462static void _spi_transfer_delay_ns(u32 ns)
1463{
1464 if (!ns)
1465 return;
1466 if (ns <= NSEC_PER_USEC) {
1467 ndelay(ns);
1468 } else {
1469 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1470
1471 if (us <= 10)
1472 udelay(us);
1473 else
1474 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1475 }
1476}
1477
1478int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1479{
1480 u32 delay = _delay->value;
1481 u32 unit = _delay->unit;
1482 u32 hz;
1483
1484 if (!delay)
1485 return 0;
1486
1487 switch (unit) {
1488 case SPI_DELAY_UNIT_USECS:
1489 delay *= NSEC_PER_USEC;
1490 break;
1491 case SPI_DELAY_UNIT_NSECS:
1492 /* Nothing to do here */
1493 break;
1494 case SPI_DELAY_UNIT_SCK:
1495 /* Clock cycles need to be obtained from spi_transfer */
1496 if (!xfer)
1497 return -EINVAL;
1498 /*
1499 * If there is unknown effective speed, approximate it
1500 * by underestimating with half of the requested Hz.
1501 */
1502 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1503 if (!hz)
1504 return -EINVAL;
1505
1506 /* Convert delay to nanoseconds */
1507 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1508 break;
1509 default:
1510 return -EINVAL;
1511 }
1512
1513 return delay;
1514}
1515EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1516
1517int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1518{
1519 int delay;
1520
1521 might_sleep();
1522
1523 if (!_delay)
1524 return -EINVAL;
1525
1526 delay = spi_delay_to_ns(_delay, xfer);
1527 if (delay < 0)
1528 return delay;
1529
1530 _spi_transfer_delay_ns(delay);
1531
1532 return 0;
1533}
1534EXPORT_SYMBOL_GPL(spi_delay_exec);
1535
1536static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1537 struct spi_transfer *xfer)
1538{
1539 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1540 u32 delay = xfer->cs_change_delay.value;
1541 u32 unit = xfer->cs_change_delay.unit;
1542 int ret;
1543
1544 /* Return early on "fast" mode - for everything but USECS */
1545 if (!delay) {
1546 if (unit == SPI_DELAY_UNIT_USECS)
1547 _spi_transfer_delay_ns(default_delay_ns);
1548 return;
1549 }
1550
1551 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1552 if (ret) {
1553 dev_err_once(&msg->spi->dev,
1554 "Use of unsupported delay unit %i, using default of %luus\n",
1555 unit, default_delay_ns / NSEC_PER_USEC);
1556 _spi_transfer_delay_ns(default_delay_ns);
1557 }
1558}
1559
1560void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1561 struct spi_transfer *xfer)
1562{
1563 _spi_transfer_cs_change_delay(msg, xfer);
1564}
1565EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1566
1567/*
1568 * spi_transfer_one_message - Default implementation of transfer_one_message()
1569 *
1570 * This is a standard implementation of transfer_one_message() for
1571 * drivers which implement a transfer_one() operation. It provides
1572 * standard handling of delays and chip select management.
1573 */
1574static int spi_transfer_one_message(struct spi_controller *ctlr,
1575 struct spi_message *msg)
1576{
1577 struct spi_transfer *xfer;
1578 bool keep_cs = false;
1579 int ret = 0;
1580 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1581 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1582
1583 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1584 spi_set_cs(msg->spi, !xfer->cs_off, false);
1585
1586 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1587 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1588
1589 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1590 trace_spi_transfer_start(msg, xfer);
1591
1592 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1593 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1594
1595 if (!ctlr->ptp_sts_supported) {
1596 xfer->ptp_sts_word_pre = 0;
1597 ptp_read_system_prets(xfer->ptp_sts);
1598 }
1599
1600 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1601 reinit_completion(&ctlr->xfer_completion);
1602
1603fallback_pio:
1604 spi_dma_sync_for_device(ctlr, xfer);
1605 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1606 if (ret < 0) {
1607 spi_dma_sync_for_cpu(ctlr, xfer);
1608
1609 if (ctlr->cur_msg_mapped &&
1610 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1611 __spi_unmap_msg(ctlr, msg);
1612 ctlr->fallback = true;
1613 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1614 goto fallback_pio;
1615 }
1616
1617 SPI_STATISTICS_INCREMENT_FIELD(statm,
1618 errors);
1619 SPI_STATISTICS_INCREMENT_FIELD(stats,
1620 errors);
1621 dev_err(&msg->spi->dev,
1622 "SPI transfer failed: %d\n", ret);
1623 goto out;
1624 }
1625
1626 if (ret > 0) {
1627 ret = spi_transfer_wait(ctlr, msg, xfer);
1628 if (ret < 0)
1629 msg->status = ret;
1630 }
1631
1632 spi_dma_sync_for_cpu(ctlr, xfer);
1633 } else {
1634 if (xfer->len)
1635 dev_err(&msg->spi->dev,
1636 "Bufferless transfer has length %u\n",
1637 xfer->len);
1638 }
1639
1640 if (!ctlr->ptp_sts_supported) {
1641 ptp_read_system_postts(xfer->ptp_sts);
1642 xfer->ptp_sts_word_post = xfer->len;
1643 }
1644
1645 trace_spi_transfer_stop(msg, xfer);
1646
1647 if (msg->status != -EINPROGRESS)
1648 goto out;
1649
1650 spi_transfer_delay_exec(xfer);
1651
1652 if (xfer->cs_change) {
1653 if (list_is_last(&xfer->transfer_list,
1654 &msg->transfers)) {
1655 keep_cs = true;
1656 } else {
1657 if (!xfer->cs_off)
1658 spi_set_cs(msg->spi, false, false);
1659 _spi_transfer_cs_change_delay(msg, xfer);
1660 if (!list_next_entry(xfer, transfer_list)->cs_off)
1661 spi_set_cs(msg->spi, true, false);
1662 }
1663 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1664 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1665 spi_set_cs(msg->spi, xfer->cs_off, false);
1666 }
1667
1668 msg->actual_length += xfer->len;
1669 }
1670
1671out:
1672 if (ret != 0 || !keep_cs)
1673 spi_set_cs(msg->spi, false, false);
1674
1675 if (msg->status == -EINPROGRESS)
1676 msg->status = ret;
1677
1678 if (msg->status && ctlr->handle_err)
1679 ctlr->handle_err(ctlr, msg);
1680
1681 spi_finalize_current_message(ctlr);
1682
1683 return ret;
1684}
1685
1686/**
1687 * spi_finalize_current_transfer - report completion of a transfer
1688 * @ctlr: the controller reporting completion
1689 *
1690 * Called by SPI drivers using the core transfer_one_message()
1691 * implementation to notify it that the current interrupt driven
1692 * transfer has finished and the next one may be scheduled.
1693 */
1694void spi_finalize_current_transfer(struct spi_controller *ctlr)
1695{
1696 complete(&ctlr->xfer_completion);
1697}
1698EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1699
1700static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1701{
1702 if (ctlr->auto_runtime_pm) {
1703 pm_runtime_mark_last_busy(ctlr->dev.parent);
1704 pm_runtime_put_autosuspend(ctlr->dev.parent);
1705 }
1706}
1707
1708static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1709 struct spi_message *msg, bool was_busy)
1710{
1711 struct spi_transfer *xfer;
1712 int ret;
1713
1714 if (!was_busy && ctlr->auto_runtime_pm) {
1715 ret = pm_runtime_get_sync(ctlr->dev.parent);
1716 if (ret < 0) {
1717 pm_runtime_put_noidle(ctlr->dev.parent);
1718 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1719 ret);
1720
1721 msg->status = ret;
1722 spi_finalize_current_message(ctlr);
1723
1724 return ret;
1725 }
1726 }
1727
1728 if (!was_busy)
1729 trace_spi_controller_busy(ctlr);
1730
1731 if (!was_busy && ctlr->prepare_transfer_hardware) {
1732 ret = ctlr->prepare_transfer_hardware(ctlr);
1733 if (ret) {
1734 dev_err(&ctlr->dev,
1735 "failed to prepare transfer hardware: %d\n",
1736 ret);
1737
1738 if (ctlr->auto_runtime_pm)
1739 pm_runtime_put(ctlr->dev.parent);
1740
1741 msg->status = ret;
1742 spi_finalize_current_message(ctlr);
1743
1744 return ret;
1745 }
1746 }
1747
1748 trace_spi_message_start(msg);
1749
1750 ret = spi_split_transfers_maxsize(ctlr, msg,
1751 spi_max_transfer_size(msg->spi),
1752 GFP_KERNEL | GFP_DMA);
1753 if (ret) {
1754 msg->status = ret;
1755 spi_finalize_current_message(ctlr);
1756 return ret;
1757 }
1758
1759 if (ctlr->prepare_message) {
1760 ret = ctlr->prepare_message(ctlr, msg);
1761 if (ret) {
1762 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1763 ret);
1764 msg->status = ret;
1765 spi_finalize_current_message(ctlr);
1766 return ret;
1767 }
1768 msg->prepared = true;
1769 }
1770
1771 ret = spi_map_msg(ctlr, msg);
1772 if (ret) {
1773 msg->status = ret;
1774 spi_finalize_current_message(ctlr);
1775 return ret;
1776 }
1777
1778 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1779 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1780 xfer->ptp_sts_word_pre = 0;
1781 ptp_read_system_prets(xfer->ptp_sts);
1782 }
1783 }
1784
1785 /*
1786 * Drivers implementation of transfer_one_message() must arrange for
1787 * spi_finalize_current_message() to get called. Most drivers will do
1788 * this in the calling context, but some don't. For those cases, a
1789 * completion is used to guarantee that this function does not return
1790 * until spi_finalize_current_message() is done accessing
1791 * ctlr->cur_msg.
1792 * Use of the following two flags enable to opportunistically skip the
1793 * use of the completion since its use involves expensive spin locks.
1794 * In case of a race with the context that calls
1795 * spi_finalize_current_message() the completion will always be used,
1796 * due to strict ordering of these flags using barriers.
1797 */
1798 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1799 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1800 reinit_completion(&ctlr->cur_msg_completion);
1801 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1802
1803 ret = ctlr->transfer_one_message(ctlr, msg);
1804 if (ret) {
1805 dev_err(&ctlr->dev,
1806 "failed to transfer one message from queue\n");
1807 return ret;
1808 }
1809
1810 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1811 smp_mb(); /* See spi_finalize_current_message()... */
1812 if (READ_ONCE(ctlr->cur_msg_incomplete))
1813 wait_for_completion(&ctlr->cur_msg_completion);
1814
1815 return 0;
1816}
1817
1818/**
1819 * __spi_pump_messages - function which processes SPI message queue
1820 * @ctlr: controller to process queue for
1821 * @in_kthread: true if we are in the context of the message pump thread
1822 *
1823 * This function checks if there is any SPI message in the queue that
1824 * needs processing and if so call out to the driver to initialize hardware
1825 * and transfer each message.
1826 *
1827 * Note that it is called both from the kthread itself and also from
1828 * inside spi_sync(); the queue extraction handling at the top of the
1829 * function should deal with this safely.
1830 */
1831static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1832{
1833 struct spi_message *msg;
1834 bool was_busy = false;
1835 unsigned long flags;
1836 int ret;
1837
1838 /* Take the I/O mutex */
1839 mutex_lock(&ctlr->io_mutex);
1840
1841 /* Lock queue */
1842 spin_lock_irqsave(&ctlr->queue_lock, flags);
1843
1844 /* Make sure we are not already running a message */
1845 if (ctlr->cur_msg)
1846 goto out_unlock;
1847
1848 /* Check if the queue is idle */
1849 if (list_empty(&ctlr->queue) || !ctlr->running) {
1850 if (!ctlr->busy)
1851 goto out_unlock;
1852
1853 /* Defer any non-atomic teardown to the thread */
1854 if (!in_kthread) {
1855 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1856 !ctlr->unprepare_transfer_hardware) {
1857 spi_idle_runtime_pm(ctlr);
1858 ctlr->busy = false;
1859 ctlr->queue_empty = true;
1860 trace_spi_controller_idle(ctlr);
1861 } else {
1862 kthread_queue_work(ctlr->kworker,
1863 &ctlr->pump_messages);
1864 }
1865 goto out_unlock;
1866 }
1867
1868 ctlr->busy = false;
1869 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1870
1871 kfree(ctlr->dummy_rx);
1872 ctlr->dummy_rx = NULL;
1873 kfree(ctlr->dummy_tx);
1874 ctlr->dummy_tx = NULL;
1875 if (ctlr->unprepare_transfer_hardware &&
1876 ctlr->unprepare_transfer_hardware(ctlr))
1877 dev_err(&ctlr->dev,
1878 "failed to unprepare transfer hardware\n");
1879 spi_idle_runtime_pm(ctlr);
1880 trace_spi_controller_idle(ctlr);
1881
1882 spin_lock_irqsave(&ctlr->queue_lock, flags);
1883 ctlr->queue_empty = true;
1884 goto out_unlock;
1885 }
1886
1887 /* Extract head of queue */
1888 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1889 ctlr->cur_msg = msg;
1890
1891 list_del_init(&msg->queue);
1892 if (ctlr->busy)
1893 was_busy = true;
1894 else
1895 ctlr->busy = true;
1896 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1897
1898 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1899 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1900
1901 ctlr->cur_msg = NULL;
1902 ctlr->fallback = false;
1903
1904 mutex_unlock(&ctlr->io_mutex);
1905
1906 /* Prod the scheduler in case transfer_one() was busy waiting */
1907 if (!ret)
1908 cond_resched();
1909 return;
1910
1911out_unlock:
1912 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1913 mutex_unlock(&ctlr->io_mutex);
1914}
1915
1916/**
1917 * spi_pump_messages - kthread work function which processes spi message queue
1918 * @work: pointer to kthread work struct contained in the controller struct
1919 */
1920static void spi_pump_messages(struct kthread_work *work)
1921{
1922 struct spi_controller *ctlr =
1923 container_of(work, struct spi_controller, pump_messages);
1924
1925 __spi_pump_messages(ctlr, true);
1926}
1927
1928/**
1929 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1930 * @ctlr: Pointer to the spi_controller structure of the driver
1931 * @xfer: Pointer to the transfer being timestamped
1932 * @progress: How many words (not bytes) have been transferred so far
1933 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1934 * transfer, for less jitter in time measurement. Only compatible
1935 * with PIO drivers. If true, must follow up with
1936 * spi_take_timestamp_post or otherwise system will crash.
1937 * WARNING: for fully predictable results, the CPU frequency must
1938 * also be under control (governor).
1939 *
1940 * This is a helper for drivers to collect the beginning of the TX timestamp
1941 * for the requested byte from the SPI transfer. The frequency with which this
1942 * function must be called (once per word, once for the whole transfer, once
1943 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1944 * greater than or equal to the requested byte at the time of the call. The
1945 * timestamp is only taken once, at the first such call. It is assumed that
1946 * the driver advances its @tx buffer pointer monotonically.
1947 */
1948void spi_take_timestamp_pre(struct spi_controller *ctlr,
1949 struct spi_transfer *xfer,
1950 size_t progress, bool irqs_off)
1951{
1952 if (!xfer->ptp_sts)
1953 return;
1954
1955 if (xfer->timestamped)
1956 return;
1957
1958 if (progress > xfer->ptp_sts_word_pre)
1959 return;
1960
1961 /* Capture the resolution of the timestamp */
1962 xfer->ptp_sts_word_pre = progress;
1963
1964 if (irqs_off) {
1965 local_irq_save(ctlr->irq_flags);
1966 preempt_disable();
1967 }
1968
1969 ptp_read_system_prets(xfer->ptp_sts);
1970}
1971EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1972
1973/**
1974 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1975 * @ctlr: Pointer to the spi_controller structure of the driver
1976 * @xfer: Pointer to the transfer being timestamped
1977 * @progress: How many words (not bytes) have been transferred so far
1978 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1979 *
1980 * This is a helper for drivers to collect the end of the TX timestamp for
1981 * the requested byte from the SPI transfer. Can be called with an arbitrary
1982 * frequency: only the first call where @tx exceeds or is equal to the
1983 * requested word will be timestamped.
1984 */
1985void spi_take_timestamp_post(struct spi_controller *ctlr,
1986 struct spi_transfer *xfer,
1987 size_t progress, bool irqs_off)
1988{
1989 if (!xfer->ptp_sts)
1990 return;
1991
1992 if (xfer->timestamped)
1993 return;
1994
1995 if (progress < xfer->ptp_sts_word_post)
1996 return;
1997
1998 ptp_read_system_postts(xfer->ptp_sts);
1999
2000 if (irqs_off) {
2001 local_irq_restore(ctlr->irq_flags);
2002 preempt_enable();
2003 }
2004
2005 /* Capture the resolution of the timestamp */
2006 xfer->ptp_sts_word_post = progress;
2007
2008 xfer->timestamped = 1;
2009}
2010EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2011
2012/**
2013 * spi_set_thread_rt - set the controller to pump at realtime priority
2014 * @ctlr: controller to boost priority of
2015 *
2016 * This can be called because the controller requested realtime priority
2017 * (by setting the ->rt value before calling spi_register_controller()) or
2018 * because a device on the bus said that its transfers needed realtime
2019 * priority.
2020 *
2021 * NOTE: at the moment if any device on a bus says it needs realtime then
2022 * the thread will be at realtime priority for all transfers on that
2023 * controller. If this eventually becomes a problem we may see if we can
2024 * find a way to boost the priority only temporarily during relevant
2025 * transfers.
2026 */
2027static void spi_set_thread_rt(struct spi_controller *ctlr)
2028{
2029 dev_info(&ctlr->dev,
2030 "will run message pump with realtime priority\n");
2031 sched_set_fifo(ctlr->kworker->task);
2032}
2033
2034static int spi_init_queue(struct spi_controller *ctlr)
2035{
2036 ctlr->running = false;
2037 ctlr->busy = false;
2038 ctlr->queue_empty = true;
2039
2040 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
2041 if (IS_ERR(ctlr->kworker)) {
2042 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2043 return PTR_ERR(ctlr->kworker);
2044 }
2045
2046 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2047
2048 /*
2049 * Controller config will indicate if this controller should run the
2050 * message pump with high (realtime) priority to reduce the transfer
2051 * latency on the bus by minimising the delay between a transfer
2052 * request and the scheduling of the message pump thread. Without this
2053 * setting the message pump thread will remain at default priority.
2054 */
2055 if (ctlr->rt)
2056 spi_set_thread_rt(ctlr);
2057
2058 return 0;
2059}
2060
2061/**
2062 * spi_get_next_queued_message() - called by driver to check for queued
2063 * messages
2064 * @ctlr: the controller to check for queued messages
2065 *
2066 * If there are more messages in the queue, the next message is returned from
2067 * this call.
2068 *
2069 * Return: the next message in the queue, else NULL if the queue is empty.
2070 */
2071struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2072{
2073 struct spi_message *next;
2074 unsigned long flags;
2075
2076 /* Get a pointer to the next message, if any */
2077 spin_lock_irqsave(&ctlr->queue_lock, flags);
2078 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2079 queue);
2080 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2081
2082 return next;
2083}
2084EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2085
2086/**
2087 * spi_finalize_current_message() - the current message is complete
2088 * @ctlr: the controller to return the message to
2089 *
2090 * Called by the driver to notify the core that the message in the front of the
2091 * queue is complete and can be removed from the queue.
2092 */
2093void spi_finalize_current_message(struct spi_controller *ctlr)
2094{
2095 struct spi_transfer *xfer;
2096 struct spi_message *mesg;
2097 int ret;
2098
2099 mesg = ctlr->cur_msg;
2100
2101 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2102 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2103 ptp_read_system_postts(xfer->ptp_sts);
2104 xfer->ptp_sts_word_post = xfer->len;
2105 }
2106 }
2107
2108 if (unlikely(ctlr->ptp_sts_supported))
2109 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2110 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2111
2112 spi_unmap_msg(ctlr, mesg);
2113
2114 /*
2115 * In the prepare_messages callback the SPI bus has the opportunity
2116 * to split a transfer to smaller chunks.
2117 *
2118 * Release the split transfers here since spi_map_msg() is done on
2119 * the split transfers.
2120 */
2121 spi_res_release(ctlr, mesg);
2122
2123 if (mesg->prepared && ctlr->unprepare_message) {
2124 ret = ctlr->unprepare_message(ctlr, mesg);
2125 if (ret) {
2126 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2127 ret);
2128 }
2129 }
2130
2131 mesg->prepared = false;
2132
2133 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2134 smp_mb(); /* See __spi_pump_transfer_message()... */
2135 if (READ_ONCE(ctlr->cur_msg_need_completion))
2136 complete(&ctlr->cur_msg_completion);
2137
2138 trace_spi_message_done(mesg);
2139
2140 mesg->state = NULL;
2141 if (mesg->complete)
2142 mesg->complete(mesg->context);
2143}
2144EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2145
2146static int spi_start_queue(struct spi_controller *ctlr)
2147{
2148 unsigned long flags;
2149
2150 spin_lock_irqsave(&ctlr->queue_lock, flags);
2151
2152 if (ctlr->running || ctlr->busy) {
2153 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2154 return -EBUSY;
2155 }
2156
2157 ctlr->running = true;
2158 ctlr->cur_msg = NULL;
2159 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2160
2161 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2162
2163 return 0;
2164}
2165
2166static int spi_stop_queue(struct spi_controller *ctlr)
2167{
2168 unsigned long flags;
2169 unsigned limit = 500;
2170 int ret = 0;
2171
2172 spin_lock_irqsave(&ctlr->queue_lock, flags);
2173
2174 /*
2175 * This is a bit lame, but is optimized for the common execution path.
2176 * A wait_queue on the ctlr->busy could be used, but then the common
2177 * execution path (pump_messages) would be required to call wake_up or
2178 * friends on every SPI message. Do this instead.
2179 */
2180 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2181 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2182 usleep_range(10000, 11000);
2183 spin_lock_irqsave(&ctlr->queue_lock, flags);
2184 }
2185
2186 if (!list_empty(&ctlr->queue) || ctlr->busy)
2187 ret = -EBUSY;
2188 else
2189 ctlr->running = false;
2190
2191 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2192
2193 return ret;
2194}
2195
2196static int spi_destroy_queue(struct spi_controller *ctlr)
2197{
2198 int ret;
2199
2200 ret = spi_stop_queue(ctlr);
2201
2202 /*
2203 * kthread_flush_worker will block until all work is done.
2204 * If the reason that stop_queue timed out is that the work will never
2205 * finish, then it does no good to call flush/stop thread, so
2206 * return anyway.
2207 */
2208 if (ret) {
2209 dev_err(&ctlr->dev, "problem destroying queue\n");
2210 return ret;
2211 }
2212
2213 kthread_destroy_worker(ctlr->kworker);
2214
2215 return 0;
2216}
2217
2218static int __spi_queued_transfer(struct spi_device *spi,
2219 struct spi_message *msg,
2220 bool need_pump)
2221{
2222 struct spi_controller *ctlr = spi->controller;
2223 unsigned long flags;
2224
2225 spin_lock_irqsave(&ctlr->queue_lock, flags);
2226
2227 if (!ctlr->running) {
2228 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2229 return -ESHUTDOWN;
2230 }
2231 msg->actual_length = 0;
2232 msg->status = -EINPROGRESS;
2233
2234 list_add_tail(&msg->queue, &ctlr->queue);
2235 ctlr->queue_empty = false;
2236 if (!ctlr->busy && need_pump)
2237 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2238
2239 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2240 return 0;
2241}
2242
2243/**
2244 * spi_queued_transfer - transfer function for queued transfers
2245 * @spi: SPI device which is requesting transfer
2246 * @msg: SPI message which is to handled is queued to driver queue
2247 *
2248 * Return: zero on success, else a negative error code.
2249 */
2250static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2251{
2252 return __spi_queued_transfer(spi, msg, true);
2253}
2254
2255static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2256{
2257 int ret;
2258
2259 ctlr->transfer = spi_queued_transfer;
2260 if (!ctlr->transfer_one_message)
2261 ctlr->transfer_one_message = spi_transfer_one_message;
2262
2263 /* Initialize and start queue */
2264 ret = spi_init_queue(ctlr);
2265 if (ret) {
2266 dev_err(&ctlr->dev, "problem initializing queue\n");
2267 goto err_init_queue;
2268 }
2269 ctlr->queued = true;
2270 ret = spi_start_queue(ctlr);
2271 if (ret) {
2272 dev_err(&ctlr->dev, "problem starting queue\n");
2273 goto err_start_queue;
2274 }
2275
2276 return 0;
2277
2278err_start_queue:
2279 spi_destroy_queue(ctlr);
2280err_init_queue:
2281 return ret;
2282}
2283
2284/**
2285 * spi_flush_queue - Send all pending messages in the queue from the callers'
2286 * context
2287 * @ctlr: controller to process queue for
2288 *
2289 * This should be used when one wants to ensure all pending messages have been
2290 * sent before doing something. Is used by the spi-mem code to make sure SPI
2291 * memory operations do not preempt regular SPI transfers that have been queued
2292 * before the spi-mem operation.
2293 */
2294void spi_flush_queue(struct spi_controller *ctlr)
2295{
2296 if (ctlr->transfer == spi_queued_transfer)
2297 __spi_pump_messages(ctlr, false);
2298}
2299
2300/*-------------------------------------------------------------------------*/
2301
2302#if defined(CONFIG_OF)
2303static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2304 struct spi_delay *delay, const char *prop)
2305{
2306 u32 value;
2307
2308 if (!of_property_read_u32(nc, prop, &value)) {
2309 if (value > U16_MAX) {
2310 delay->value = DIV_ROUND_UP(value, 1000);
2311 delay->unit = SPI_DELAY_UNIT_USECS;
2312 } else {
2313 delay->value = value;
2314 delay->unit = SPI_DELAY_UNIT_NSECS;
2315 }
2316 }
2317}
2318
2319static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2320 struct device_node *nc)
2321{
2322 u32 value, cs[SPI_CS_CNT_MAX];
2323 int rc, idx;
2324
2325 /* Mode (clock phase/polarity/etc.) */
2326 if (of_property_read_bool(nc, "spi-cpha"))
2327 spi->mode |= SPI_CPHA;
2328 if (of_property_read_bool(nc, "spi-cpol"))
2329 spi->mode |= SPI_CPOL;
2330 if (of_property_read_bool(nc, "spi-3wire"))
2331 spi->mode |= SPI_3WIRE;
2332 if (of_property_read_bool(nc, "spi-lsb-first"))
2333 spi->mode |= SPI_LSB_FIRST;
2334 if (of_property_read_bool(nc, "spi-cs-high"))
2335 spi->mode |= SPI_CS_HIGH;
2336
2337 /* Device DUAL/QUAD mode */
2338 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2339 switch (value) {
2340 case 0:
2341 spi->mode |= SPI_NO_TX;
2342 break;
2343 case 1:
2344 break;
2345 case 2:
2346 spi->mode |= SPI_TX_DUAL;
2347 break;
2348 case 4:
2349 spi->mode |= SPI_TX_QUAD;
2350 break;
2351 case 8:
2352 spi->mode |= SPI_TX_OCTAL;
2353 break;
2354 default:
2355 dev_warn(&ctlr->dev,
2356 "spi-tx-bus-width %d not supported\n",
2357 value);
2358 break;
2359 }
2360 }
2361
2362 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2363 switch (value) {
2364 case 0:
2365 spi->mode |= SPI_NO_RX;
2366 break;
2367 case 1:
2368 break;
2369 case 2:
2370 spi->mode |= SPI_RX_DUAL;
2371 break;
2372 case 4:
2373 spi->mode |= SPI_RX_QUAD;
2374 break;
2375 case 8:
2376 spi->mode |= SPI_RX_OCTAL;
2377 break;
2378 default:
2379 dev_warn(&ctlr->dev,
2380 "spi-rx-bus-width %d not supported\n",
2381 value);
2382 break;
2383 }
2384 }
2385
2386 if (spi_controller_is_slave(ctlr)) {
2387 if (!of_node_name_eq(nc, "slave")) {
2388 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2389 nc);
2390 return -EINVAL;
2391 }
2392 return 0;
2393 }
2394
2395 if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2396 dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2397 return -EINVAL;
2398 }
2399
2400 /*
2401 * Zero(0) is a valid physical CS value and can be located at any
2402 * logical CS in the spi->chip_select[]. If all the physical CS
2403 * are initialized to 0 then It would be difficult to differentiate
2404 * between a valid physical CS 0 & an unused logical CS whose physical
2405 * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
2406 * Now all the unused logical CS will have 0xFF physical CS value & can be
2407 * ignore while performing physical CS validity checks.
2408 */
2409 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
2410 spi_set_chipselect(spi, idx, 0xFF);
2411
2412 /* Device address */
2413 rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2414 SPI_CS_CNT_MAX);
2415 if (rc < 0) {
2416 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2417 nc, rc);
2418 return rc;
2419 }
2420 if (rc > ctlr->num_chipselect) {
2421 dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2422 nc, rc);
2423 return rc;
2424 }
2425 if ((of_property_read_bool(nc, "parallel-memories")) &&
2426 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2427 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2428 return -EINVAL;
2429 }
2430 for (idx = 0; idx < rc; idx++)
2431 spi_set_chipselect(spi, idx, cs[idx]);
2432
2433 /*
2434 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
2435 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
2436 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
2437 * spi->chip_select[0] will give the physical CS.
2438 * By default spi->chip_select[0] will hold the physical CS number so, set
2439 * spi->cs_index_mask as 0x01.
2440 */
2441 spi->cs_index_mask = 0x01;
2442
2443 /* Device speed */
2444 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2445 spi->max_speed_hz = value;
2446
2447 /* Device CS delays */
2448 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2449 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2450 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2451
2452 return 0;
2453}
2454
2455static struct spi_device *
2456of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2457{
2458 struct spi_device *spi;
2459 int rc;
2460
2461 /* Alloc an spi_device */
2462 spi = spi_alloc_device(ctlr);
2463 if (!spi) {
2464 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2465 rc = -ENOMEM;
2466 goto err_out;
2467 }
2468
2469 /* Select device driver */
2470 rc = of_alias_from_compatible(nc, spi->modalias,
2471 sizeof(spi->modalias));
2472 if (rc < 0) {
2473 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2474 goto err_out;
2475 }
2476
2477 rc = of_spi_parse_dt(ctlr, spi, nc);
2478 if (rc)
2479 goto err_out;
2480
2481 /* Store a pointer to the node in the device structure */
2482 of_node_get(nc);
2483
2484 device_set_node(&spi->dev, of_fwnode_handle(nc));
2485
2486 /* Register the new device */
2487 rc = spi_add_device(spi);
2488 if (rc) {
2489 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2490 goto err_of_node_put;
2491 }
2492
2493 return spi;
2494
2495err_of_node_put:
2496 of_node_put(nc);
2497err_out:
2498 spi_dev_put(spi);
2499 return ERR_PTR(rc);
2500}
2501
2502/**
2503 * of_register_spi_devices() - Register child devices onto the SPI bus
2504 * @ctlr: Pointer to spi_controller device
2505 *
2506 * Registers an spi_device for each child node of controller node which
2507 * represents a valid SPI slave.
2508 */
2509static void of_register_spi_devices(struct spi_controller *ctlr)
2510{
2511 struct spi_device *spi;
2512 struct device_node *nc;
2513
2514 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2515 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2516 continue;
2517 spi = of_register_spi_device(ctlr, nc);
2518 if (IS_ERR(spi)) {
2519 dev_warn(&ctlr->dev,
2520 "Failed to create SPI device for %pOF\n", nc);
2521 of_node_clear_flag(nc, OF_POPULATED);
2522 }
2523 }
2524}
2525#else
2526static void of_register_spi_devices(struct spi_controller *ctlr) { }
2527#endif
2528
2529/**
2530 * spi_new_ancillary_device() - Register ancillary SPI device
2531 * @spi: Pointer to the main SPI device registering the ancillary device
2532 * @chip_select: Chip Select of the ancillary device
2533 *
2534 * Register an ancillary SPI device; for example some chips have a chip-select
2535 * for normal device usage and another one for setup/firmware upload.
2536 *
2537 * This may only be called from main SPI device's probe routine.
2538 *
2539 * Return: 0 on success; negative errno on failure
2540 */
2541struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2542 u8 chip_select)
2543{
2544 struct spi_controller *ctlr = spi->controller;
2545 struct spi_device *ancillary;
2546 int rc = 0;
2547 u8 idx;
2548
2549 /* Alloc an spi_device */
2550 ancillary = spi_alloc_device(ctlr);
2551 if (!ancillary) {
2552 rc = -ENOMEM;
2553 goto err_out;
2554 }
2555
2556 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2557
2558 /*
2559 * Zero(0) is a valid physical CS value and can be located at any
2560 * logical CS in the spi->chip_select[]. If all the physical CS
2561 * are initialized to 0 then It would be difficult to differentiate
2562 * between a valid physical CS 0 & an unused logical CS whose physical
2563 * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
2564 * Now all the unused logical CS will have 0xFF physical CS value & can be
2565 * ignore while performing physical CS validity checks.
2566 */
2567 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
2568 spi_set_chipselect(ancillary, idx, 0xFF);
2569
2570 /* Use provided chip-select for ancillary device */
2571 spi_set_chipselect(ancillary, 0, chip_select);
2572
2573 /* Take over SPI mode/speed from SPI main device */
2574 ancillary->max_speed_hz = spi->max_speed_hz;
2575 ancillary->mode = spi->mode;
2576 /*
2577 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
2578 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
2579 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
2580 * spi->chip_select[0] will give the physical CS.
2581 * By default spi->chip_select[0] will hold the physical CS number so, set
2582 * spi->cs_index_mask as 0x01.
2583 */
2584 ancillary->cs_index_mask = 0x01;
2585
2586 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2587
2588 /* Register the new device */
2589 rc = __spi_add_device(ancillary);
2590 if (rc) {
2591 dev_err(&spi->dev, "failed to register ancillary device\n");
2592 goto err_out;
2593 }
2594
2595 return ancillary;
2596
2597err_out:
2598 spi_dev_put(ancillary);
2599 return ERR_PTR(rc);
2600}
2601EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2602
2603#ifdef CONFIG_ACPI
2604struct acpi_spi_lookup {
2605 struct spi_controller *ctlr;
2606 u32 max_speed_hz;
2607 u32 mode;
2608 int irq;
2609 u8 bits_per_word;
2610 u8 chip_select;
2611 int n;
2612 int index;
2613};
2614
2615static int acpi_spi_count(struct acpi_resource *ares, void *data)
2616{
2617 struct acpi_resource_spi_serialbus *sb;
2618 int *count = data;
2619
2620 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2621 return 1;
2622
2623 sb = &ares->data.spi_serial_bus;
2624 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2625 return 1;
2626
2627 *count = *count + 1;
2628
2629 return 1;
2630}
2631
2632/**
2633 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2634 * @adev: ACPI device
2635 *
2636 * Return: the number of SpiSerialBus resources in the ACPI-device's
2637 * resource-list; or a negative error code.
2638 */
2639int acpi_spi_count_resources(struct acpi_device *adev)
2640{
2641 LIST_HEAD(r);
2642 int count = 0;
2643 int ret;
2644
2645 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2646 if (ret < 0)
2647 return ret;
2648
2649 acpi_dev_free_resource_list(&r);
2650
2651 return count;
2652}
2653EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2654
2655static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2656 struct acpi_spi_lookup *lookup)
2657{
2658 const union acpi_object *obj;
2659
2660 if (!x86_apple_machine)
2661 return;
2662
2663 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2664 && obj->buffer.length >= 4)
2665 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2666
2667 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2668 && obj->buffer.length == 8)
2669 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2670
2671 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2672 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2673 lookup->mode |= SPI_LSB_FIRST;
2674
2675 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2676 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2677 lookup->mode |= SPI_CPOL;
2678
2679 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2680 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2681 lookup->mode |= SPI_CPHA;
2682}
2683
2684static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2685{
2686 struct acpi_spi_lookup *lookup = data;
2687 struct spi_controller *ctlr = lookup->ctlr;
2688
2689 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2690 struct acpi_resource_spi_serialbus *sb;
2691 acpi_handle parent_handle;
2692 acpi_status status;
2693
2694 sb = &ares->data.spi_serial_bus;
2695 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2696
2697 if (lookup->index != -1 && lookup->n++ != lookup->index)
2698 return 1;
2699
2700 status = acpi_get_handle(NULL,
2701 sb->resource_source.string_ptr,
2702 &parent_handle);
2703
2704 if (ACPI_FAILURE(status))
2705 return -ENODEV;
2706
2707 if (ctlr) {
2708 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2709 return -ENODEV;
2710 } else {
2711 struct acpi_device *adev;
2712
2713 adev = acpi_fetch_acpi_dev(parent_handle);
2714 if (!adev)
2715 return -ENODEV;
2716
2717 ctlr = acpi_spi_find_controller_by_adev(adev);
2718 if (!ctlr)
2719 return -EPROBE_DEFER;
2720
2721 lookup->ctlr = ctlr;
2722 }
2723
2724 /*
2725 * ACPI DeviceSelection numbering is handled by the
2726 * host controller driver in Windows and can vary
2727 * from driver to driver. In Linux we always expect
2728 * 0 .. max - 1 so we need to ask the driver to
2729 * translate between the two schemes.
2730 */
2731 if (ctlr->fw_translate_cs) {
2732 int cs = ctlr->fw_translate_cs(ctlr,
2733 sb->device_selection);
2734 if (cs < 0)
2735 return cs;
2736 lookup->chip_select = cs;
2737 } else {
2738 lookup->chip_select = sb->device_selection;
2739 }
2740
2741 lookup->max_speed_hz = sb->connection_speed;
2742 lookup->bits_per_word = sb->data_bit_length;
2743
2744 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2745 lookup->mode |= SPI_CPHA;
2746 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2747 lookup->mode |= SPI_CPOL;
2748 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2749 lookup->mode |= SPI_CS_HIGH;
2750 }
2751 } else if (lookup->irq < 0) {
2752 struct resource r;
2753
2754 if (acpi_dev_resource_interrupt(ares, 0, &r))
2755 lookup->irq = r.start;
2756 }
2757
2758 /* Always tell the ACPI core to skip this resource */
2759 return 1;
2760}
2761
2762/**
2763 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2764 * @ctlr: controller to which the spi device belongs
2765 * @adev: ACPI Device for the spi device
2766 * @index: Index of the spi resource inside the ACPI Node
2767 *
2768 * This should be used to allocate a new SPI device from and ACPI Device node.
2769 * The caller is responsible for calling spi_add_device to register the SPI device.
2770 *
2771 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2772 * using the resource.
2773 * If index is set to -1, index is not used.
2774 * Note: If index is -1, ctlr must be set.
2775 *
2776 * Return: a pointer to the new device, or ERR_PTR on error.
2777 */
2778struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2779 struct acpi_device *adev,
2780 int index)
2781{
2782 acpi_handle parent_handle = NULL;
2783 struct list_head resource_list;
2784 struct acpi_spi_lookup lookup = {};
2785 struct spi_device *spi;
2786 int ret;
2787 u8 idx;
2788
2789 if (!ctlr && index == -1)
2790 return ERR_PTR(-EINVAL);
2791
2792 lookup.ctlr = ctlr;
2793 lookup.irq = -1;
2794 lookup.index = index;
2795 lookup.n = 0;
2796
2797 INIT_LIST_HEAD(&resource_list);
2798 ret = acpi_dev_get_resources(adev, &resource_list,
2799 acpi_spi_add_resource, &lookup);
2800 acpi_dev_free_resource_list(&resource_list);
2801
2802 if (ret < 0)
2803 /* Found SPI in _CRS but it points to another controller */
2804 return ERR_PTR(ret);
2805
2806 if (!lookup.max_speed_hz &&
2807 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2808 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2809 /* Apple does not use _CRS but nested devices for SPI slaves */
2810 acpi_spi_parse_apple_properties(adev, &lookup);
2811 }
2812
2813 if (!lookup.max_speed_hz)
2814 return ERR_PTR(-ENODEV);
2815
2816 spi = spi_alloc_device(lookup.ctlr);
2817 if (!spi) {
2818 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2819 dev_name(&adev->dev));
2820 return ERR_PTR(-ENOMEM);
2821 }
2822
2823 /*
2824 * Zero(0) is a valid physical CS value and can be located at any
2825 * logical CS in the spi->chip_select[]. If all the physical CS
2826 * are initialized to 0 then It would be difficult to differentiate
2827 * between a valid physical CS 0 & an unused logical CS whose physical
2828 * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
2829 * Now all the unused logical CS will have 0xFF physical CS value & can be
2830 * ignore while performing physical CS validity checks.
2831 */
2832 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
2833 spi_set_chipselect(spi, idx, 0xFF);
2834
2835 ACPI_COMPANION_SET(&spi->dev, adev);
2836 spi->max_speed_hz = lookup.max_speed_hz;
2837 spi->mode |= lookup.mode;
2838 spi->irq = lookup.irq;
2839 spi->bits_per_word = lookup.bits_per_word;
2840 spi_set_chipselect(spi, 0, lookup.chip_select);
2841 /*
2842 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
2843 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
2844 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
2845 * spi->chip_select[0] will give the physical CS.
2846 * By default spi->chip_select[0] will hold the physical CS number so, set
2847 * spi->cs_index_mask as 0x01.
2848 */
2849 spi->cs_index_mask = 0x01;
2850
2851 return spi;
2852}
2853EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2854
2855static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2856 struct acpi_device *adev)
2857{
2858 struct spi_device *spi;
2859
2860 if (acpi_bus_get_status(adev) || !adev->status.present ||
2861 acpi_device_enumerated(adev))
2862 return AE_OK;
2863
2864 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2865 if (IS_ERR(spi)) {
2866 if (PTR_ERR(spi) == -ENOMEM)
2867 return AE_NO_MEMORY;
2868 else
2869 return AE_OK;
2870 }
2871
2872 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2873 sizeof(spi->modalias));
2874
2875 if (spi->irq < 0)
2876 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2877
2878 acpi_device_set_enumerated(adev);
2879
2880 adev->power.flags.ignore_parent = true;
2881 if (spi_add_device(spi)) {
2882 adev->power.flags.ignore_parent = false;
2883 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2884 dev_name(&adev->dev));
2885 spi_dev_put(spi);
2886 }
2887
2888 return AE_OK;
2889}
2890
2891static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2892 void *data, void **return_value)
2893{
2894 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2895 struct spi_controller *ctlr = data;
2896
2897 if (!adev)
2898 return AE_OK;
2899
2900 return acpi_register_spi_device(ctlr, adev);
2901}
2902
2903#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2904
2905static void acpi_register_spi_devices(struct spi_controller *ctlr)
2906{
2907 acpi_status status;
2908 acpi_handle handle;
2909
2910 handle = ACPI_HANDLE(ctlr->dev.parent);
2911 if (!handle)
2912 return;
2913
2914 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2915 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2916 acpi_spi_add_device, NULL, ctlr, NULL);
2917 if (ACPI_FAILURE(status))
2918 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2919}
2920#else
2921static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2922#endif /* CONFIG_ACPI */
2923
2924static void spi_controller_release(struct device *dev)
2925{
2926 struct spi_controller *ctlr;
2927
2928 ctlr = container_of(dev, struct spi_controller, dev);
2929 kfree(ctlr);
2930}
2931
2932static struct class spi_master_class = {
2933 .name = "spi_master",
2934 .dev_release = spi_controller_release,
2935 .dev_groups = spi_master_groups,
2936};
2937
2938#ifdef CONFIG_SPI_SLAVE
2939/**
2940 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2941 * controller
2942 * @spi: device used for the current transfer
2943 */
2944int spi_slave_abort(struct spi_device *spi)
2945{
2946 struct spi_controller *ctlr = spi->controller;
2947
2948 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2949 return ctlr->slave_abort(ctlr);
2950
2951 return -ENOTSUPP;
2952}
2953EXPORT_SYMBOL_GPL(spi_slave_abort);
2954
2955int spi_target_abort(struct spi_device *spi)
2956{
2957 struct spi_controller *ctlr = spi->controller;
2958
2959 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2960 return ctlr->target_abort(ctlr);
2961
2962 return -ENOTSUPP;
2963}
2964EXPORT_SYMBOL_GPL(spi_target_abort);
2965
2966static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2967 char *buf)
2968{
2969 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2970 dev);
2971 struct device *child;
2972
2973 child = device_find_any_child(&ctlr->dev);
2974 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2975}
2976
2977static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2978 const char *buf, size_t count)
2979{
2980 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2981 dev);
2982 struct spi_device *spi;
2983 struct device *child;
2984 char name[32];
2985 int rc;
2986
2987 rc = sscanf(buf, "%31s", name);
2988 if (rc != 1 || !name[0])
2989 return -EINVAL;
2990
2991 child = device_find_any_child(&ctlr->dev);
2992 if (child) {
2993 /* Remove registered slave */
2994 device_unregister(child);
2995 put_device(child);
2996 }
2997
2998 if (strcmp(name, "(null)")) {
2999 /* Register new slave */
3000 spi = spi_alloc_device(ctlr);
3001 if (!spi)
3002 return -ENOMEM;
3003
3004 strscpy(spi->modalias, name, sizeof(spi->modalias));
3005
3006 rc = spi_add_device(spi);
3007 if (rc) {
3008 spi_dev_put(spi);
3009 return rc;
3010 }
3011 }
3012
3013 return count;
3014}
3015
3016static DEVICE_ATTR_RW(slave);
3017
3018static struct attribute *spi_slave_attrs[] = {
3019 &dev_attr_slave.attr,
3020 NULL,
3021};
3022
3023static const struct attribute_group spi_slave_group = {
3024 .attrs = spi_slave_attrs,
3025};
3026
3027static const struct attribute_group *spi_slave_groups[] = {
3028 &spi_controller_statistics_group,
3029 &spi_slave_group,
3030 NULL,
3031};
3032
3033static struct class spi_slave_class = {
3034 .name = "spi_slave",
3035 .dev_release = spi_controller_release,
3036 .dev_groups = spi_slave_groups,
3037};
3038#else
3039extern struct class spi_slave_class; /* dummy */
3040#endif
3041
3042/**
3043 * __spi_alloc_controller - allocate an SPI master or slave controller
3044 * @dev: the controller, possibly using the platform_bus
3045 * @size: how much zeroed driver-private data to allocate; the pointer to this
3046 * memory is in the driver_data field of the returned device, accessible
3047 * with spi_controller_get_devdata(); the memory is cacheline aligned;
3048 * drivers granting DMA access to portions of their private data need to
3049 * round up @size using ALIGN(size, dma_get_cache_alignment()).
3050 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
3051 * slave (true) controller
3052 * Context: can sleep
3053 *
3054 * This call is used only by SPI controller drivers, which are the
3055 * only ones directly touching chip registers. It's how they allocate
3056 * an spi_controller structure, prior to calling spi_register_controller().
3057 *
3058 * This must be called from context that can sleep.
3059 *
3060 * The caller is responsible for assigning the bus number and initializing the
3061 * controller's methods before calling spi_register_controller(); and (after
3062 * errors adding the device) calling spi_controller_put() to prevent a memory
3063 * leak.
3064 *
3065 * Return: the SPI controller structure on success, else NULL.
3066 */
3067struct spi_controller *__spi_alloc_controller(struct device *dev,
3068 unsigned int size, bool slave)
3069{
3070 struct spi_controller *ctlr;
3071 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3072
3073 if (!dev)
3074 return NULL;
3075
3076 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3077 if (!ctlr)
3078 return NULL;
3079
3080 device_initialize(&ctlr->dev);
3081 INIT_LIST_HEAD(&ctlr->queue);
3082 spin_lock_init(&ctlr->queue_lock);
3083 spin_lock_init(&ctlr->bus_lock_spinlock);
3084 mutex_init(&ctlr->bus_lock_mutex);
3085 mutex_init(&ctlr->io_mutex);
3086 mutex_init(&ctlr->add_lock);
3087 ctlr->bus_num = -1;
3088 ctlr->num_chipselect = 1;
3089 ctlr->slave = slave;
3090 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
3091 ctlr->dev.class = &spi_slave_class;
3092 else
3093 ctlr->dev.class = &spi_master_class;
3094 ctlr->dev.parent = dev;
3095 pm_suspend_ignore_children(&ctlr->dev, true);
3096 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3097
3098 return ctlr;
3099}
3100EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3101
3102static void devm_spi_release_controller(struct device *dev, void *ctlr)
3103{
3104 spi_controller_put(*(struct spi_controller **)ctlr);
3105}
3106
3107/**
3108 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3109 * @dev: physical device of SPI controller
3110 * @size: how much zeroed driver-private data to allocate
3111 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
3112 * Context: can sleep
3113 *
3114 * Allocate an SPI controller and automatically release a reference on it
3115 * when @dev is unbound from its driver. Drivers are thus relieved from
3116 * having to call spi_controller_put().
3117 *
3118 * The arguments to this function are identical to __spi_alloc_controller().
3119 *
3120 * Return: the SPI controller structure on success, else NULL.
3121 */
3122struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3123 unsigned int size,
3124 bool slave)
3125{
3126 struct spi_controller **ptr, *ctlr;
3127
3128 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3129 GFP_KERNEL);
3130 if (!ptr)
3131 return NULL;
3132
3133 ctlr = __spi_alloc_controller(dev, size, slave);
3134 if (ctlr) {
3135 ctlr->devm_allocated = true;
3136 *ptr = ctlr;
3137 devres_add(dev, ptr);
3138 } else {
3139 devres_free(ptr);
3140 }
3141
3142 return ctlr;
3143}
3144EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3145
3146/**
3147 * spi_get_gpio_descs() - grab chip select GPIOs for the master
3148 * @ctlr: The SPI master to grab GPIO descriptors for
3149 */
3150static int spi_get_gpio_descs(struct spi_controller *ctlr)
3151{
3152 int nb, i;
3153 struct gpio_desc **cs;
3154 struct device *dev = &ctlr->dev;
3155 unsigned long native_cs_mask = 0;
3156 unsigned int num_cs_gpios = 0;
3157
3158 nb = gpiod_count(dev, "cs");
3159 if (nb < 0) {
3160 /* No GPIOs at all is fine, else return the error */
3161 if (nb == -ENOENT)
3162 return 0;
3163 return nb;
3164 }
3165
3166 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3167
3168 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3169 GFP_KERNEL);
3170 if (!cs)
3171 return -ENOMEM;
3172 ctlr->cs_gpiods = cs;
3173
3174 for (i = 0; i < nb; i++) {
3175 /*
3176 * Most chipselects are active low, the inverted
3177 * semantics are handled by special quirks in gpiolib,
3178 * so initializing them GPIOD_OUT_LOW here means
3179 * "unasserted", in most cases this will drive the physical
3180 * line high.
3181 */
3182 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3183 GPIOD_OUT_LOW);
3184 if (IS_ERR(cs[i]))
3185 return PTR_ERR(cs[i]);
3186
3187 if (cs[i]) {
3188 /*
3189 * If we find a CS GPIO, name it after the device and
3190 * chip select line.
3191 */
3192 char *gpioname;
3193
3194 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3195 dev_name(dev), i);
3196 if (!gpioname)
3197 return -ENOMEM;
3198 gpiod_set_consumer_name(cs[i], gpioname);
3199 num_cs_gpios++;
3200 continue;
3201 }
3202
3203 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3204 dev_err(dev, "Invalid native chip select %d\n", i);
3205 return -EINVAL;
3206 }
3207 native_cs_mask |= BIT(i);
3208 }
3209
3210 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3211
3212 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3213 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3214 dev_err(dev, "No unused native chip select available\n");
3215 return -EINVAL;
3216 }
3217
3218 return 0;
3219}
3220
3221static int spi_controller_check_ops(struct spi_controller *ctlr)
3222{
3223 /*
3224 * The controller may implement only the high-level SPI-memory like
3225 * operations if it does not support regular SPI transfers, and this is
3226 * valid use case.
3227 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3228 * one of the ->transfer_xxx() method be implemented.
3229 */
3230 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3231 if (!ctlr->transfer && !ctlr->transfer_one &&
3232 !ctlr->transfer_one_message) {
3233 return -EINVAL;
3234 }
3235 }
3236
3237 return 0;
3238}
3239
3240/* Allocate dynamic bus number using Linux idr */
3241static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3242{
3243 int id;
3244
3245 mutex_lock(&board_lock);
3246 id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3247 mutex_unlock(&board_lock);
3248 if (WARN(id < 0, "couldn't get idr"))
3249 return id == -ENOSPC ? -EBUSY : id;
3250 ctlr->bus_num = id;
3251 return 0;
3252}
3253
3254/**
3255 * spi_register_controller - register SPI master or slave controller
3256 * @ctlr: initialized master, originally from spi_alloc_master() or
3257 * spi_alloc_slave()
3258 * Context: can sleep
3259 *
3260 * SPI controllers connect to their drivers using some non-SPI bus,
3261 * such as the platform bus. The final stage of probe() in that code
3262 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3263 *
3264 * SPI controllers use board specific (often SOC specific) bus numbers,
3265 * and board-specific addressing for SPI devices combines those numbers
3266 * with chip select numbers. Since SPI does not directly support dynamic
3267 * device identification, boards need configuration tables telling which
3268 * chip is at which address.
3269 *
3270 * This must be called from context that can sleep. It returns zero on
3271 * success, else a negative error code (dropping the controller's refcount).
3272 * After a successful return, the caller is responsible for calling
3273 * spi_unregister_controller().
3274 *
3275 * Return: zero on success, else a negative error code.
3276 */
3277int spi_register_controller(struct spi_controller *ctlr)
3278{
3279 struct device *dev = ctlr->dev.parent;
3280 struct boardinfo *bi;
3281 int first_dynamic;
3282 int status;
3283 int idx;
3284
3285 if (!dev)
3286 return -ENODEV;
3287
3288 /*
3289 * Make sure all necessary hooks are implemented before registering
3290 * the SPI controller.
3291 */
3292 status = spi_controller_check_ops(ctlr);
3293 if (status)
3294 return status;
3295
3296 if (ctlr->bus_num < 0)
3297 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3298 if (ctlr->bus_num >= 0) {
3299 /* Devices with a fixed bus num must check-in with the num */
3300 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3301 if (status)
3302 return status;
3303 }
3304 if (ctlr->bus_num < 0) {
3305 first_dynamic = of_alias_get_highest_id("spi");
3306 if (first_dynamic < 0)
3307 first_dynamic = 0;
3308 else
3309 first_dynamic++;
3310
3311 status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3312 if (status)
3313 return status;
3314 }
3315 ctlr->bus_lock_flag = 0;
3316 init_completion(&ctlr->xfer_completion);
3317 init_completion(&ctlr->cur_msg_completion);
3318 if (!ctlr->max_dma_len)
3319 ctlr->max_dma_len = INT_MAX;
3320
3321 /*
3322 * Register the device, then userspace will see it.
3323 * Registration fails if the bus ID is in use.
3324 */
3325 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3326
3327 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3328 status = spi_get_gpio_descs(ctlr);
3329 if (status)
3330 goto free_bus_id;
3331 /*
3332 * A controller using GPIO descriptors always
3333 * supports SPI_CS_HIGH if need be.
3334 */
3335 ctlr->mode_bits |= SPI_CS_HIGH;
3336 }
3337
3338 /*
3339 * Even if it's just one always-selected device, there must
3340 * be at least one chipselect.
3341 */
3342 if (!ctlr->num_chipselect) {
3343 status = -EINVAL;
3344 goto free_bus_id;
3345 }
3346
3347 /* Setting last_cs to -1 means no chip selected */
3348 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3349 ctlr->last_cs[idx] = -1;
3350
3351 status = device_add(&ctlr->dev);
3352 if (status < 0)
3353 goto free_bus_id;
3354 dev_dbg(dev, "registered %s %s\n",
3355 spi_controller_is_slave(ctlr) ? "slave" : "master",
3356 dev_name(&ctlr->dev));
3357
3358 /*
3359 * If we're using a queued driver, start the queue. Note that we don't
3360 * need the queueing logic if the driver is only supporting high-level
3361 * memory operations.
3362 */
3363 if (ctlr->transfer) {
3364 dev_info(dev, "controller is unqueued, this is deprecated\n");
3365 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3366 status = spi_controller_initialize_queue(ctlr);
3367 if (status) {
3368 device_del(&ctlr->dev);
3369 goto free_bus_id;
3370 }
3371 }
3372 /* Add statistics */
3373 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3374 if (!ctlr->pcpu_statistics) {
3375 dev_err(dev, "Error allocating per-cpu statistics\n");
3376 status = -ENOMEM;
3377 goto destroy_queue;
3378 }
3379
3380 mutex_lock(&board_lock);
3381 list_add_tail(&ctlr->list, &spi_controller_list);
3382 list_for_each_entry(bi, &board_list, list)
3383 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3384 mutex_unlock(&board_lock);
3385
3386 /* Register devices from the device tree and ACPI */
3387 of_register_spi_devices(ctlr);
3388 acpi_register_spi_devices(ctlr);
3389 return status;
3390
3391destroy_queue:
3392 spi_destroy_queue(ctlr);
3393free_bus_id:
3394 mutex_lock(&board_lock);
3395 idr_remove(&spi_master_idr, ctlr->bus_num);
3396 mutex_unlock(&board_lock);
3397 return status;
3398}
3399EXPORT_SYMBOL_GPL(spi_register_controller);
3400
3401static void devm_spi_unregister(struct device *dev, void *res)
3402{
3403 spi_unregister_controller(*(struct spi_controller **)res);
3404}
3405
3406/**
3407 * devm_spi_register_controller - register managed SPI master or slave
3408 * controller
3409 * @dev: device managing SPI controller
3410 * @ctlr: initialized controller, originally from spi_alloc_master() or
3411 * spi_alloc_slave()
3412 * Context: can sleep
3413 *
3414 * Register a SPI device as with spi_register_controller() which will
3415 * automatically be unregistered and freed.
3416 *
3417 * Return: zero on success, else a negative error code.
3418 */
3419int devm_spi_register_controller(struct device *dev,
3420 struct spi_controller *ctlr)
3421{
3422 struct spi_controller **ptr;
3423 int ret;
3424
3425 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3426 if (!ptr)
3427 return -ENOMEM;
3428
3429 ret = spi_register_controller(ctlr);
3430 if (!ret) {
3431 *ptr = ctlr;
3432 devres_add(dev, ptr);
3433 } else {
3434 devres_free(ptr);
3435 }
3436
3437 return ret;
3438}
3439EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3440
3441static int __unregister(struct device *dev, void *null)
3442{
3443 spi_unregister_device(to_spi_device(dev));
3444 return 0;
3445}
3446
3447/**
3448 * spi_unregister_controller - unregister SPI master or slave controller
3449 * @ctlr: the controller being unregistered
3450 * Context: can sleep
3451 *
3452 * This call is used only by SPI controller drivers, which are the
3453 * only ones directly touching chip registers.
3454 *
3455 * This must be called from context that can sleep.
3456 *
3457 * Note that this function also drops a reference to the controller.
3458 */
3459void spi_unregister_controller(struct spi_controller *ctlr)
3460{
3461 struct spi_controller *found;
3462 int id = ctlr->bus_num;
3463
3464 /* Prevent addition of new devices, unregister existing ones */
3465 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3466 mutex_lock(&ctlr->add_lock);
3467
3468 device_for_each_child(&ctlr->dev, NULL, __unregister);
3469
3470 /* First make sure that this controller was ever added */
3471 mutex_lock(&board_lock);
3472 found = idr_find(&spi_master_idr, id);
3473 mutex_unlock(&board_lock);
3474 if (ctlr->queued) {
3475 if (spi_destroy_queue(ctlr))
3476 dev_err(&ctlr->dev, "queue remove failed\n");
3477 }
3478 mutex_lock(&board_lock);
3479 list_del(&ctlr->list);
3480 mutex_unlock(&board_lock);
3481
3482 device_del(&ctlr->dev);
3483
3484 /* Free bus id */
3485 mutex_lock(&board_lock);
3486 if (found == ctlr)
3487 idr_remove(&spi_master_idr, id);
3488 mutex_unlock(&board_lock);
3489
3490 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3491 mutex_unlock(&ctlr->add_lock);
3492
3493 /*
3494 * Release the last reference on the controller if its driver
3495 * has not yet been converted to devm_spi_alloc_master/slave().
3496 */
3497 if (!ctlr->devm_allocated)
3498 put_device(&ctlr->dev);
3499}
3500EXPORT_SYMBOL_GPL(spi_unregister_controller);
3501
3502static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3503{
3504 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3505}
3506
3507static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3508{
3509 mutex_lock(&ctlr->bus_lock_mutex);
3510 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3511 mutex_unlock(&ctlr->bus_lock_mutex);
3512}
3513
3514static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3515{
3516 mutex_lock(&ctlr->bus_lock_mutex);
3517 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3518 mutex_unlock(&ctlr->bus_lock_mutex);
3519}
3520
3521int spi_controller_suspend(struct spi_controller *ctlr)
3522{
3523 int ret = 0;
3524
3525 /* Basically no-ops for non-queued controllers */
3526 if (ctlr->queued) {
3527 ret = spi_stop_queue(ctlr);
3528 if (ret)
3529 dev_err(&ctlr->dev, "queue stop failed\n");
3530 }
3531
3532 __spi_mark_suspended(ctlr);
3533 return ret;
3534}
3535EXPORT_SYMBOL_GPL(spi_controller_suspend);
3536
3537int spi_controller_resume(struct spi_controller *ctlr)
3538{
3539 int ret = 0;
3540
3541 __spi_mark_resumed(ctlr);
3542
3543 if (ctlr->queued) {
3544 ret = spi_start_queue(ctlr);
3545 if (ret)
3546 dev_err(&ctlr->dev, "queue restart failed\n");
3547 }
3548 return ret;
3549}
3550EXPORT_SYMBOL_GPL(spi_controller_resume);
3551
3552/*-------------------------------------------------------------------------*/
3553
3554/* Core methods for spi_message alterations */
3555
3556static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3557 struct spi_message *msg,
3558 void *res)
3559{
3560 struct spi_replaced_transfers *rxfer = res;
3561 size_t i;
3562
3563 /* Call extra callback if requested */
3564 if (rxfer->release)
3565 rxfer->release(ctlr, msg, res);
3566
3567 /* Insert replaced transfers back into the message */
3568 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3569
3570 /* Remove the formerly inserted entries */
3571 for (i = 0; i < rxfer->inserted; i++)
3572 list_del(&rxfer->inserted_transfers[i].transfer_list);
3573}
3574
3575/**
3576 * spi_replace_transfers - replace transfers with several transfers
3577 * and register change with spi_message.resources
3578 * @msg: the spi_message we work upon
3579 * @xfer_first: the first spi_transfer we want to replace
3580 * @remove: number of transfers to remove
3581 * @insert: the number of transfers we want to insert instead
3582 * @release: extra release code necessary in some circumstances
3583 * @extradatasize: extra data to allocate (with alignment guarantees
3584 * of struct @spi_transfer)
3585 * @gfp: gfp flags
3586 *
3587 * Returns: pointer to @spi_replaced_transfers,
3588 * PTR_ERR(...) in case of errors.
3589 */
3590static struct spi_replaced_transfers *spi_replace_transfers(
3591 struct spi_message *msg,
3592 struct spi_transfer *xfer_first,
3593 size_t remove,
3594 size_t insert,
3595 spi_replaced_release_t release,
3596 size_t extradatasize,
3597 gfp_t gfp)
3598{
3599 struct spi_replaced_transfers *rxfer;
3600 struct spi_transfer *xfer;
3601 size_t i;
3602
3603 /* Allocate the structure using spi_res */
3604 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3605 struct_size(rxfer, inserted_transfers, insert)
3606 + extradatasize,
3607 gfp);
3608 if (!rxfer)
3609 return ERR_PTR(-ENOMEM);
3610
3611 /* The release code to invoke before running the generic release */
3612 rxfer->release = release;
3613
3614 /* Assign extradata */
3615 if (extradatasize)
3616 rxfer->extradata =
3617 &rxfer->inserted_transfers[insert];
3618
3619 /* Init the replaced_transfers list */
3620 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3621
3622 /*
3623 * Assign the list_entry after which we should reinsert
3624 * the @replaced_transfers - it may be spi_message.messages!
3625 */
3626 rxfer->replaced_after = xfer_first->transfer_list.prev;
3627
3628 /* Remove the requested number of transfers */
3629 for (i = 0; i < remove; i++) {
3630 /*
3631 * If the entry after replaced_after it is msg->transfers
3632 * then we have been requested to remove more transfers
3633 * than are in the list.
3634 */
3635 if (rxfer->replaced_after->next == &msg->transfers) {
3636 dev_err(&msg->spi->dev,
3637 "requested to remove more spi_transfers than are available\n");
3638 /* Insert replaced transfers back into the message */
3639 list_splice(&rxfer->replaced_transfers,
3640 rxfer->replaced_after);
3641
3642 /* Free the spi_replace_transfer structure... */
3643 spi_res_free(rxfer);
3644
3645 /* ...and return with an error */
3646 return ERR_PTR(-EINVAL);
3647 }
3648
3649 /*
3650 * Remove the entry after replaced_after from list of
3651 * transfers and add it to list of replaced_transfers.
3652 */
3653 list_move_tail(rxfer->replaced_after->next,
3654 &rxfer->replaced_transfers);
3655 }
3656
3657 /*
3658 * Create copy of the given xfer with identical settings
3659 * based on the first transfer to get removed.
3660 */
3661 for (i = 0; i < insert; i++) {
3662 /* We need to run in reverse order */
3663 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3664
3665 /* Copy all spi_transfer data */
3666 memcpy(xfer, xfer_first, sizeof(*xfer));
3667
3668 /* Add to list */
3669 list_add(&xfer->transfer_list, rxfer->replaced_after);
3670
3671 /* Clear cs_change and delay for all but the last */
3672 if (i) {
3673 xfer->cs_change = false;
3674 xfer->delay.value = 0;
3675 }
3676 }
3677
3678 /* Set up inserted... */
3679 rxfer->inserted = insert;
3680
3681 /* ...and register it with spi_res/spi_message */
3682 spi_res_add(msg, rxfer);
3683
3684 return rxfer;
3685}
3686
3687static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3688 struct spi_message *msg,
3689 struct spi_transfer **xferp,
3690 size_t maxsize,
3691 gfp_t gfp)
3692{
3693 struct spi_transfer *xfer = *xferp, *xfers;
3694 struct spi_replaced_transfers *srt;
3695 size_t offset;
3696 size_t count, i;
3697
3698 /* Calculate how many we have to replace */
3699 count = DIV_ROUND_UP(xfer->len, maxsize);
3700
3701 /* Create replacement */
3702 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3703 if (IS_ERR(srt))
3704 return PTR_ERR(srt);
3705 xfers = srt->inserted_transfers;
3706
3707 /*
3708 * Now handle each of those newly inserted spi_transfers.
3709 * Note that the replacements spi_transfers all are preset
3710 * to the same values as *xferp, so tx_buf, rx_buf and len
3711 * are all identical (as well as most others)
3712 * so we just have to fix up len and the pointers.
3713 *
3714 * This also includes support for the depreciated
3715 * spi_message.is_dma_mapped interface.
3716 */
3717
3718 /*
3719 * The first transfer just needs the length modified, so we
3720 * run it outside the loop.
3721 */
3722 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3723
3724 /* All the others need rx_buf/tx_buf also set */
3725 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3726 /* Update rx_buf, tx_buf and DMA */
3727 if (xfers[i].rx_buf)
3728 xfers[i].rx_buf += offset;
3729 if (xfers[i].rx_dma)
3730 xfers[i].rx_dma += offset;
3731 if (xfers[i].tx_buf)
3732 xfers[i].tx_buf += offset;
3733 if (xfers[i].tx_dma)
3734 xfers[i].tx_dma += offset;
3735
3736 /* Update length */
3737 xfers[i].len = min(maxsize, xfers[i].len - offset);
3738 }
3739
3740 /*
3741 * We set up xferp to the last entry we have inserted,
3742 * so that we skip those already split transfers.
3743 */
3744 *xferp = &xfers[count - 1];
3745
3746 /* Increment statistics counters */
3747 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3748 transfers_split_maxsize);
3749 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3750 transfers_split_maxsize);
3751
3752 return 0;
3753}
3754
3755/**
3756 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3757 * when an individual transfer exceeds a
3758 * certain size
3759 * @ctlr: the @spi_controller for this transfer
3760 * @msg: the @spi_message to transform
3761 * @maxsize: the maximum when to apply this
3762 * @gfp: GFP allocation flags
3763 *
3764 * Return: status of transformation
3765 */
3766int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3767 struct spi_message *msg,
3768 size_t maxsize,
3769 gfp_t gfp)
3770{
3771 struct spi_transfer *xfer;
3772 int ret;
3773
3774 /*
3775 * Iterate over the transfer_list,
3776 * but note that xfer is advanced to the last transfer inserted
3777 * to avoid checking sizes again unnecessarily (also xfer does
3778 * potentially belong to a different list by the time the
3779 * replacement has happened).
3780 */
3781 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3782 if (xfer->len > maxsize) {
3783 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3784 maxsize, gfp);
3785 if (ret)
3786 return ret;
3787 }
3788 }
3789
3790 return 0;
3791}
3792EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3793
3794
3795/**
3796 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3797 * when an individual transfer exceeds a
3798 * certain number of SPI words
3799 * @ctlr: the @spi_controller for this transfer
3800 * @msg: the @spi_message to transform
3801 * @maxwords: the number of words to limit each transfer to
3802 * @gfp: GFP allocation flags
3803 *
3804 * Return: status of transformation
3805 */
3806int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3807 struct spi_message *msg,
3808 size_t maxwords,
3809 gfp_t gfp)
3810{
3811 struct spi_transfer *xfer;
3812
3813 /*
3814 * Iterate over the transfer_list,
3815 * but note that xfer is advanced to the last transfer inserted
3816 * to avoid checking sizes again unnecessarily (also xfer does
3817 * potentially belong to a different list by the time the
3818 * replacement has happened).
3819 */
3820 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3821 size_t maxsize;
3822 int ret;
3823
3824 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3825 if (xfer->len > maxsize) {
3826 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3827 maxsize, gfp);
3828 if (ret)
3829 return ret;
3830 }
3831 }
3832
3833 return 0;
3834}
3835EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3836
3837/*-------------------------------------------------------------------------*/
3838
3839/*
3840 * Core methods for SPI controller protocol drivers. Some of the
3841 * other core methods are currently defined as inline functions.
3842 */
3843
3844static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3845 u8 bits_per_word)
3846{
3847 if (ctlr->bits_per_word_mask) {
3848 /* Only 32 bits fit in the mask */
3849 if (bits_per_word > 32)
3850 return -EINVAL;
3851 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3852 return -EINVAL;
3853 }
3854
3855 return 0;
3856}
3857
3858/**
3859 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3860 * @spi: the device that requires specific CS timing configuration
3861 *
3862 * Return: zero on success, else a negative error code.
3863 */
3864static int spi_set_cs_timing(struct spi_device *spi)
3865{
3866 struct device *parent = spi->controller->dev.parent;
3867 int status = 0;
3868
3869 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3870 if (spi->controller->auto_runtime_pm) {
3871 status = pm_runtime_get_sync(parent);
3872 if (status < 0) {
3873 pm_runtime_put_noidle(parent);
3874 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3875 status);
3876 return status;
3877 }
3878
3879 status = spi->controller->set_cs_timing(spi);
3880 pm_runtime_mark_last_busy(parent);
3881 pm_runtime_put_autosuspend(parent);
3882 } else {
3883 status = spi->controller->set_cs_timing(spi);
3884 }
3885 }
3886 return status;
3887}
3888
3889/**
3890 * spi_setup - setup SPI mode and clock rate
3891 * @spi: the device whose settings are being modified
3892 * Context: can sleep, and no requests are queued to the device
3893 *
3894 * SPI protocol drivers may need to update the transfer mode if the
3895 * device doesn't work with its default. They may likewise need
3896 * to update clock rates or word sizes from initial values. This function
3897 * changes those settings, and must be called from a context that can sleep.
3898 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3899 * effect the next time the device is selected and data is transferred to
3900 * or from it. When this function returns, the SPI device is deselected.
3901 *
3902 * Note that this call will fail if the protocol driver specifies an option
3903 * that the underlying controller or its driver does not support. For
3904 * example, not all hardware supports wire transfers using nine bit words,
3905 * LSB-first wire encoding, or active-high chipselects.
3906 *
3907 * Return: zero on success, else a negative error code.
3908 */
3909int spi_setup(struct spi_device *spi)
3910{
3911 unsigned bad_bits, ugly_bits;
3912 int status = 0;
3913
3914 /*
3915 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3916 * are set at the same time.
3917 */
3918 if ((hweight_long(spi->mode &
3919 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3920 (hweight_long(spi->mode &
3921 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3922 dev_err(&spi->dev,
3923 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3924 return -EINVAL;
3925 }
3926 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3927 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3928 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3929 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3930 return -EINVAL;
3931 /*
3932 * Help drivers fail *cleanly* when they need options
3933 * that aren't supported with their current controller.
3934 * SPI_CS_WORD has a fallback software implementation,
3935 * so it is ignored here.
3936 */
3937 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3938 SPI_NO_TX | SPI_NO_RX);
3939 ugly_bits = bad_bits &
3940 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3941 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3942 if (ugly_bits) {
3943 dev_warn(&spi->dev,
3944 "setup: ignoring unsupported mode bits %x\n",
3945 ugly_bits);
3946 spi->mode &= ~ugly_bits;
3947 bad_bits &= ~ugly_bits;
3948 }
3949 if (bad_bits) {
3950 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3951 bad_bits);
3952 return -EINVAL;
3953 }
3954
3955 if (!spi->bits_per_word) {
3956 spi->bits_per_word = 8;
3957 } else {
3958 /*
3959 * Some controllers may not support the default 8 bits-per-word
3960 * so only perform the check when this is explicitly provided.
3961 */
3962 status = __spi_validate_bits_per_word(spi->controller,
3963 spi->bits_per_word);
3964 if (status)
3965 return status;
3966 }
3967
3968 if (spi->controller->max_speed_hz &&
3969 (!spi->max_speed_hz ||
3970 spi->max_speed_hz > spi->controller->max_speed_hz))
3971 spi->max_speed_hz = spi->controller->max_speed_hz;
3972
3973 mutex_lock(&spi->controller->io_mutex);
3974
3975 if (spi->controller->setup) {
3976 status = spi->controller->setup(spi);
3977 if (status) {
3978 mutex_unlock(&spi->controller->io_mutex);
3979 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3980 status);
3981 return status;
3982 }
3983 }
3984
3985 status = spi_set_cs_timing(spi);
3986 if (status) {
3987 mutex_unlock(&spi->controller->io_mutex);
3988 return status;
3989 }
3990
3991 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3992 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3993 if (status < 0) {
3994 mutex_unlock(&spi->controller->io_mutex);
3995 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3996 status);
3997 return status;
3998 }
3999
4000 /*
4001 * We do not want to return positive value from pm_runtime_get,
4002 * there are many instances of devices calling spi_setup() and
4003 * checking for a non-zero return value instead of a negative
4004 * return value.
4005 */
4006 status = 0;
4007
4008 spi_set_cs(spi, false, true);
4009 pm_runtime_mark_last_busy(spi->controller->dev.parent);
4010 pm_runtime_put_autosuspend(spi->controller->dev.parent);
4011 } else {
4012 spi_set_cs(spi, false, true);
4013 }
4014
4015 mutex_unlock(&spi->controller->io_mutex);
4016
4017 if (spi->rt && !spi->controller->rt) {
4018 spi->controller->rt = true;
4019 spi_set_thread_rt(spi->controller);
4020 }
4021
4022 trace_spi_setup(spi, status);
4023
4024 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4025 spi->mode & SPI_MODE_X_MASK,
4026 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4027 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4028 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
4029 (spi->mode & SPI_LOOP) ? "loopback, " : "",
4030 spi->bits_per_word, spi->max_speed_hz,
4031 status);
4032
4033 return status;
4034}
4035EXPORT_SYMBOL_GPL(spi_setup);
4036
4037static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4038 struct spi_device *spi)
4039{
4040 int delay1, delay2;
4041
4042 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4043 if (delay1 < 0)
4044 return delay1;
4045
4046 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4047 if (delay2 < 0)
4048 return delay2;
4049
4050 if (delay1 < delay2)
4051 memcpy(&xfer->word_delay, &spi->word_delay,
4052 sizeof(xfer->word_delay));
4053
4054 return 0;
4055}
4056
4057static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4058{
4059 struct spi_controller *ctlr = spi->controller;
4060 struct spi_transfer *xfer;
4061 int w_size;
4062
4063 if (list_empty(&message->transfers))
4064 return -EINVAL;
4065
4066 /*
4067 * If an SPI controller does not support toggling the CS line on each
4068 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4069 * for the CS line, we can emulate the CS-per-word hardware function by
4070 * splitting transfers into one-word transfers and ensuring that
4071 * cs_change is set for each transfer.
4072 */
4073 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
4074 spi_is_csgpiod(spi))) {
4075 size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
4076 int ret;
4077
4078 /* spi_split_transfers_maxsize() requires message->spi */
4079 message->spi = spi;
4080
4081 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
4082 GFP_KERNEL);
4083 if (ret)
4084 return ret;
4085
4086 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4087 /* Don't change cs_change on the last entry in the list */
4088 if (list_is_last(&xfer->transfer_list, &message->transfers))
4089 break;
4090 xfer->cs_change = 1;
4091 }
4092 }
4093
4094 /*
4095 * Half-duplex links include original MicroWire, and ones with
4096 * only one data pin like SPI_3WIRE (switches direction) or where
4097 * either MOSI or MISO is missing. They can also be caused by
4098 * software limitations.
4099 */
4100 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4101 (spi->mode & SPI_3WIRE)) {
4102 unsigned flags = ctlr->flags;
4103
4104 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4105 if (xfer->rx_buf && xfer->tx_buf)
4106 return -EINVAL;
4107 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4108 return -EINVAL;
4109 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4110 return -EINVAL;
4111 }
4112 }
4113
4114 /*
4115 * Set transfer bits_per_word and max speed as spi device default if
4116 * it is not set for this transfer.
4117 * Set transfer tx_nbits and rx_nbits as single transfer default
4118 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4119 * Ensure transfer word_delay is at least as long as that required by
4120 * device itself.
4121 */
4122 message->frame_length = 0;
4123 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4124 xfer->effective_speed_hz = 0;
4125 message->frame_length += xfer->len;
4126 if (!xfer->bits_per_word)
4127 xfer->bits_per_word = spi->bits_per_word;
4128
4129 if (!xfer->speed_hz)
4130 xfer->speed_hz = spi->max_speed_hz;
4131
4132 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4133 xfer->speed_hz = ctlr->max_speed_hz;
4134
4135 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4136 return -EINVAL;
4137
4138 /*
4139 * SPI transfer length should be multiple of SPI word size
4140 * where SPI word size should be power-of-two multiple.
4141 */
4142 if (xfer->bits_per_word <= 8)
4143 w_size = 1;
4144 else if (xfer->bits_per_word <= 16)
4145 w_size = 2;
4146 else
4147 w_size = 4;
4148
4149 /* No partial transfers accepted */
4150 if (xfer->len % w_size)
4151 return -EINVAL;
4152
4153 if (xfer->speed_hz && ctlr->min_speed_hz &&
4154 xfer->speed_hz < ctlr->min_speed_hz)
4155 return -EINVAL;
4156
4157 if (xfer->tx_buf && !xfer->tx_nbits)
4158 xfer->tx_nbits = SPI_NBITS_SINGLE;
4159 if (xfer->rx_buf && !xfer->rx_nbits)
4160 xfer->rx_nbits = SPI_NBITS_SINGLE;
4161 /*
4162 * Check transfer tx/rx_nbits:
4163 * 1. check the value matches one of single, dual and quad
4164 * 2. check tx/rx_nbits match the mode in spi_device
4165 */
4166 if (xfer->tx_buf) {
4167 if (spi->mode & SPI_NO_TX)
4168 return -EINVAL;
4169 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4170 xfer->tx_nbits != SPI_NBITS_DUAL &&
4171 xfer->tx_nbits != SPI_NBITS_QUAD)
4172 return -EINVAL;
4173 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4174 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4175 return -EINVAL;
4176 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4177 !(spi->mode & SPI_TX_QUAD))
4178 return -EINVAL;
4179 }
4180 /* Check transfer rx_nbits */
4181 if (xfer->rx_buf) {
4182 if (spi->mode & SPI_NO_RX)
4183 return -EINVAL;
4184 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4185 xfer->rx_nbits != SPI_NBITS_DUAL &&
4186 xfer->rx_nbits != SPI_NBITS_QUAD)
4187 return -EINVAL;
4188 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4189 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4190 return -EINVAL;
4191 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4192 !(spi->mode & SPI_RX_QUAD))
4193 return -EINVAL;
4194 }
4195
4196 if (_spi_xfer_word_delay_update(xfer, spi))
4197 return -EINVAL;
4198 }
4199
4200 message->status = -EINPROGRESS;
4201
4202 return 0;
4203}
4204
4205static int __spi_async(struct spi_device *spi, struct spi_message *message)
4206{
4207 struct spi_controller *ctlr = spi->controller;
4208 struct spi_transfer *xfer;
4209
4210 /*
4211 * Some controllers do not support doing regular SPI transfers. Return
4212 * ENOTSUPP when this is the case.
4213 */
4214 if (!ctlr->transfer)
4215 return -ENOTSUPP;
4216
4217 message->spi = spi;
4218
4219 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4220 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4221
4222 trace_spi_message_submit(message);
4223
4224 if (!ctlr->ptp_sts_supported) {
4225 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4226 xfer->ptp_sts_word_pre = 0;
4227 ptp_read_system_prets(xfer->ptp_sts);
4228 }
4229 }
4230
4231 return ctlr->transfer(spi, message);
4232}
4233
4234/**
4235 * spi_async - asynchronous SPI transfer
4236 * @spi: device with which data will be exchanged
4237 * @message: describes the data transfers, including completion callback
4238 * Context: any (IRQs may be blocked, etc)
4239 *
4240 * This call may be used in_irq and other contexts which can't sleep,
4241 * as well as from task contexts which can sleep.
4242 *
4243 * The completion callback is invoked in a context which can't sleep.
4244 * Before that invocation, the value of message->status is undefined.
4245 * When the callback is issued, message->status holds either zero (to
4246 * indicate complete success) or a negative error code. After that
4247 * callback returns, the driver which issued the transfer request may
4248 * deallocate the associated memory; it's no longer in use by any SPI
4249 * core or controller driver code.
4250 *
4251 * Note that although all messages to a spi_device are handled in
4252 * FIFO order, messages may go to different devices in other orders.
4253 * Some device might be higher priority, or have various "hard" access
4254 * time requirements, for example.
4255 *
4256 * On detection of any fault during the transfer, processing of
4257 * the entire message is aborted, and the device is deselected.
4258 * Until returning from the associated message completion callback,
4259 * no other spi_message queued to that device will be processed.
4260 * (This rule applies equally to all the synchronous transfer calls,
4261 * which are wrappers around this core asynchronous primitive.)
4262 *
4263 * Return: zero on success, else a negative error code.
4264 */
4265int spi_async(struct spi_device *spi, struct spi_message *message)
4266{
4267 struct spi_controller *ctlr = spi->controller;
4268 int ret;
4269 unsigned long flags;
4270
4271 ret = __spi_validate(spi, message);
4272 if (ret != 0)
4273 return ret;
4274
4275 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4276
4277 if (ctlr->bus_lock_flag)
4278 ret = -EBUSY;
4279 else
4280 ret = __spi_async(spi, message);
4281
4282 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4283
4284 return ret;
4285}
4286EXPORT_SYMBOL_GPL(spi_async);
4287
4288/**
4289 * spi_async_locked - version of spi_async with exclusive bus usage
4290 * @spi: device with which data will be exchanged
4291 * @message: describes the data transfers, including completion callback
4292 * Context: any (IRQs may be blocked, etc)
4293 *
4294 * This call may be used in_irq and other contexts which can't sleep,
4295 * as well as from task contexts which can sleep.
4296 *
4297 * The completion callback is invoked in a context which can't sleep.
4298 * Before that invocation, the value of message->status is undefined.
4299 * When the callback is issued, message->status holds either zero (to
4300 * indicate complete success) or a negative error code. After that
4301 * callback returns, the driver which issued the transfer request may
4302 * deallocate the associated memory; it's no longer in use by any SPI
4303 * core or controller driver code.
4304 *
4305 * Note that although all messages to a spi_device are handled in
4306 * FIFO order, messages may go to different devices in other orders.
4307 * Some device might be higher priority, or have various "hard" access
4308 * time requirements, for example.
4309 *
4310 * On detection of any fault during the transfer, processing of
4311 * the entire message is aborted, and the device is deselected.
4312 * Until returning from the associated message completion callback,
4313 * no other spi_message queued to that device will be processed.
4314 * (This rule applies equally to all the synchronous transfer calls,
4315 * which are wrappers around this core asynchronous primitive.)
4316 *
4317 * Return: zero on success, else a negative error code.
4318 */
4319static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
4320{
4321 struct spi_controller *ctlr = spi->controller;
4322 int ret;
4323 unsigned long flags;
4324
4325 ret = __spi_validate(spi, message);
4326 if (ret != 0)
4327 return ret;
4328
4329 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4330
4331 ret = __spi_async(spi, message);
4332
4333 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4334
4335 return ret;
4336
4337}
4338
4339static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4340{
4341 bool was_busy;
4342 int ret;
4343
4344 mutex_lock(&ctlr->io_mutex);
4345
4346 was_busy = ctlr->busy;
4347
4348 ctlr->cur_msg = msg;
4349 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4350 if (ret)
4351 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4352 ctlr->cur_msg = NULL;
4353 ctlr->fallback = false;
4354
4355 if (!was_busy) {
4356 kfree(ctlr->dummy_rx);
4357 ctlr->dummy_rx = NULL;
4358 kfree(ctlr->dummy_tx);
4359 ctlr->dummy_tx = NULL;
4360 if (ctlr->unprepare_transfer_hardware &&
4361 ctlr->unprepare_transfer_hardware(ctlr))
4362 dev_err(&ctlr->dev,
4363 "failed to unprepare transfer hardware\n");
4364 spi_idle_runtime_pm(ctlr);
4365 }
4366
4367 mutex_unlock(&ctlr->io_mutex);
4368}
4369
4370/*-------------------------------------------------------------------------*/
4371
4372/*
4373 * Utility methods for SPI protocol drivers, layered on
4374 * top of the core. Some other utility methods are defined as
4375 * inline functions.
4376 */
4377
4378static void spi_complete(void *arg)
4379{
4380 complete(arg);
4381}
4382
4383static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4384{
4385 DECLARE_COMPLETION_ONSTACK(done);
4386 int status;
4387 struct spi_controller *ctlr = spi->controller;
4388
4389 if (__spi_check_suspended(ctlr)) {
4390 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4391 return -ESHUTDOWN;
4392 }
4393
4394 status = __spi_validate(spi, message);
4395 if (status != 0)
4396 return status;
4397
4398 message->spi = spi;
4399
4400 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4401 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4402
4403 /*
4404 * Checking queue_empty here only guarantees async/sync message
4405 * ordering when coming from the same context. It does not need to
4406 * guard against reentrancy from a different context. The io_mutex
4407 * will catch those cases.
4408 */
4409 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4410 message->actual_length = 0;
4411 message->status = -EINPROGRESS;
4412
4413 trace_spi_message_submit(message);
4414
4415 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4416 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4417
4418 __spi_transfer_message_noqueue(ctlr, message);
4419
4420 return message->status;
4421 }
4422
4423 /*
4424 * There are messages in the async queue that could have originated
4425 * from the same context, so we need to preserve ordering.
4426 * Therefor we send the message to the async queue and wait until they
4427 * are completed.
4428 */
4429 message->complete = spi_complete;
4430 message->context = &done;
4431 status = spi_async_locked(spi, message);
4432 if (status == 0) {
4433 wait_for_completion(&done);
4434 status = message->status;
4435 }
4436 message->context = NULL;
4437
4438 return status;
4439}
4440
4441/**
4442 * spi_sync - blocking/synchronous SPI data transfers
4443 * @spi: device with which data will be exchanged
4444 * @message: describes the data transfers
4445 * Context: can sleep
4446 *
4447 * This call may only be used from a context that may sleep. The sleep
4448 * is non-interruptible, and has no timeout. Low-overhead controller
4449 * drivers may DMA directly into and out of the message buffers.
4450 *
4451 * Note that the SPI device's chip select is active during the message,
4452 * and then is normally disabled between messages. Drivers for some
4453 * frequently-used devices may want to minimize costs of selecting a chip,
4454 * by leaving it selected in anticipation that the next message will go
4455 * to the same chip. (That may increase power usage.)
4456 *
4457 * Also, the caller is guaranteeing that the memory associated with the
4458 * message will not be freed before this call returns.
4459 *
4460 * Return: zero on success, else a negative error code.
4461 */
4462int spi_sync(struct spi_device *spi, struct spi_message *message)
4463{
4464 int ret;
4465
4466 mutex_lock(&spi->controller->bus_lock_mutex);
4467 ret = __spi_sync(spi, message);
4468 mutex_unlock(&spi->controller->bus_lock_mutex);
4469
4470 return ret;
4471}
4472EXPORT_SYMBOL_GPL(spi_sync);
4473
4474/**
4475 * spi_sync_locked - version of spi_sync with exclusive bus usage
4476 * @spi: device with which data will be exchanged
4477 * @message: describes the data transfers
4478 * Context: can sleep
4479 *
4480 * This call may only be used from a context that may sleep. The sleep
4481 * is non-interruptible, and has no timeout. Low-overhead controller
4482 * drivers may DMA directly into and out of the message buffers.
4483 *
4484 * This call should be used by drivers that require exclusive access to the
4485 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4486 * be released by a spi_bus_unlock call when the exclusive access is over.
4487 *
4488 * Return: zero on success, else a negative error code.
4489 */
4490int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4491{
4492 return __spi_sync(spi, message);
4493}
4494EXPORT_SYMBOL_GPL(spi_sync_locked);
4495
4496/**
4497 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4498 * @ctlr: SPI bus master that should be locked for exclusive bus access
4499 * Context: can sleep
4500 *
4501 * This call may only be used from a context that may sleep. The sleep
4502 * is non-interruptible, and has no timeout.
4503 *
4504 * This call should be used by drivers that require exclusive access to the
4505 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4506 * exclusive access is over. Data transfer must be done by spi_sync_locked
4507 * and spi_async_locked calls when the SPI bus lock is held.
4508 *
4509 * Return: always zero.
4510 */
4511int spi_bus_lock(struct spi_controller *ctlr)
4512{
4513 unsigned long flags;
4514
4515 mutex_lock(&ctlr->bus_lock_mutex);
4516
4517 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4518 ctlr->bus_lock_flag = 1;
4519 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4520
4521 /* Mutex remains locked until spi_bus_unlock() is called */
4522
4523 return 0;
4524}
4525EXPORT_SYMBOL_GPL(spi_bus_lock);
4526
4527/**
4528 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4529 * @ctlr: SPI bus master that was locked for exclusive bus access
4530 * Context: can sleep
4531 *
4532 * This call may only be used from a context that may sleep. The sleep
4533 * is non-interruptible, and has no timeout.
4534 *
4535 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4536 * call.
4537 *
4538 * Return: always zero.
4539 */
4540int spi_bus_unlock(struct spi_controller *ctlr)
4541{
4542 ctlr->bus_lock_flag = 0;
4543
4544 mutex_unlock(&ctlr->bus_lock_mutex);
4545
4546 return 0;
4547}
4548EXPORT_SYMBOL_GPL(spi_bus_unlock);
4549
4550/* Portable code must never pass more than 32 bytes */
4551#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4552
4553static u8 *buf;
4554
4555/**
4556 * spi_write_then_read - SPI synchronous write followed by read
4557 * @spi: device with which data will be exchanged
4558 * @txbuf: data to be written (need not be DMA-safe)
4559 * @n_tx: size of txbuf, in bytes
4560 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4561 * @n_rx: size of rxbuf, in bytes
4562 * Context: can sleep
4563 *
4564 * This performs a half duplex MicroWire style transaction with the
4565 * device, sending txbuf and then reading rxbuf. The return value
4566 * is zero for success, else a negative errno status code.
4567 * This call may only be used from a context that may sleep.
4568 *
4569 * Parameters to this routine are always copied using a small buffer.
4570 * Performance-sensitive or bulk transfer code should instead use
4571 * spi_{async,sync}() calls with DMA-safe buffers.
4572 *
4573 * Return: zero on success, else a negative error code.
4574 */
4575int spi_write_then_read(struct spi_device *spi,
4576 const void *txbuf, unsigned n_tx,
4577 void *rxbuf, unsigned n_rx)
4578{
4579 static DEFINE_MUTEX(lock);
4580
4581 int status;
4582 struct spi_message message;
4583 struct spi_transfer x[2];
4584 u8 *local_buf;
4585
4586 /*
4587 * Use preallocated DMA-safe buffer if we can. We can't avoid
4588 * copying here, (as a pure convenience thing), but we can
4589 * keep heap costs out of the hot path unless someone else is
4590 * using the pre-allocated buffer or the transfer is too large.
4591 */
4592 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4593 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4594 GFP_KERNEL | GFP_DMA);
4595 if (!local_buf)
4596 return -ENOMEM;
4597 } else {
4598 local_buf = buf;
4599 }
4600
4601 spi_message_init(&message);
4602 memset(x, 0, sizeof(x));
4603 if (n_tx) {
4604 x[0].len = n_tx;
4605 spi_message_add_tail(&x[0], &message);
4606 }
4607 if (n_rx) {
4608 x[1].len = n_rx;
4609 spi_message_add_tail(&x[1], &message);
4610 }
4611
4612 memcpy(local_buf, txbuf, n_tx);
4613 x[0].tx_buf = local_buf;
4614 x[1].rx_buf = local_buf + n_tx;
4615
4616 /* Do the I/O */
4617 status = spi_sync(spi, &message);
4618 if (status == 0)
4619 memcpy(rxbuf, x[1].rx_buf, n_rx);
4620
4621 if (x[0].tx_buf == buf)
4622 mutex_unlock(&lock);
4623 else
4624 kfree(local_buf);
4625
4626 return status;
4627}
4628EXPORT_SYMBOL_GPL(spi_write_then_read);
4629
4630/*-------------------------------------------------------------------------*/
4631
4632#if IS_ENABLED(CONFIG_OF_DYNAMIC)
4633/* Must call put_device() when done with returned spi_device device */
4634static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4635{
4636 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4637
4638 return dev ? to_spi_device(dev) : NULL;
4639}
4640
4641/* The spi controllers are not using spi_bus, so we find it with another way */
4642static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4643{
4644 struct device *dev;
4645
4646 dev = class_find_device_by_of_node(&spi_master_class, node);
4647 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4648 dev = class_find_device_by_of_node(&spi_slave_class, node);
4649 if (!dev)
4650 return NULL;
4651
4652 /* Reference got in class_find_device */
4653 return container_of(dev, struct spi_controller, dev);
4654}
4655
4656static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4657 void *arg)
4658{
4659 struct of_reconfig_data *rd = arg;
4660 struct spi_controller *ctlr;
4661 struct spi_device *spi;
4662
4663 switch (of_reconfig_get_state_change(action, arg)) {
4664 case OF_RECONFIG_CHANGE_ADD:
4665 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4666 if (ctlr == NULL)
4667 return NOTIFY_OK; /* Not for us */
4668
4669 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4670 put_device(&ctlr->dev);
4671 return NOTIFY_OK;
4672 }
4673
4674 /*
4675 * Clear the flag before adding the device so that fw_devlink
4676 * doesn't skip adding consumers to this device.
4677 */
4678 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4679 spi = of_register_spi_device(ctlr, rd->dn);
4680 put_device(&ctlr->dev);
4681
4682 if (IS_ERR(spi)) {
4683 pr_err("%s: failed to create for '%pOF'\n",
4684 __func__, rd->dn);
4685 of_node_clear_flag(rd->dn, OF_POPULATED);
4686 return notifier_from_errno(PTR_ERR(spi));
4687 }
4688 break;
4689
4690 case OF_RECONFIG_CHANGE_REMOVE:
4691 /* Already depopulated? */
4692 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4693 return NOTIFY_OK;
4694
4695 /* Find our device by node */
4696 spi = of_find_spi_device_by_node(rd->dn);
4697 if (spi == NULL)
4698 return NOTIFY_OK; /* No? not meant for us */
4699
4700 /* Unregister takes one ref away */
4701 spi_unregister_device(spi);
4702
4703 /* And put the reference of the find */
4704 put_device(&spi->dev);
4705 break;
4706 }
4707
4708 return NOTIFY_OK;
4709}
4710
4711static struct notifier_block spi_of_notifier = {
4712 .notifier_call = of_spi_notify,
4713};
4714#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4715extern struct notifier_block spi_of_notifier;
4716#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4717
4718#if IS_ENABLED(CONFIG_ACPI)
4719static int spi_acpi_controller_match(struct device *dev, const void *data)
4720{
4721 return ACPI_COMPANION(dev->parent) == data;
4722}
4723
4724struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4725{
4726 struct device *dev;
4727
4728 dev = class_find_device(&spi_master_class, NULL, adev,
4729 spi_acpi_controller_match);
4730 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4731 dev = class_find_device(&spi_slave_class, NULL, adev,
4732 spi_acpi_controller_match);
4733 if (!dev)
4734 return NULL;
4735
4736 return container_of(dev, struct spi_controller, dev);
4737}
4738EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4739
4740static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4741{
4742 struct device *dev;
4743
4744 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4745 return to_spi_device(dev);
4746}
4747
4748static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4749 void *arg)
4750{
4751 struct acpi_device *adev = arg;
4752 struct spi_controller *ctlr;
4753 struct spi_device *spi;
4754
4755 switch (value) {
4756 case ACPI_RECONFIG_DEVICE_ADD:
4757 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4758 if (!ctlr)
4759 break;
4760
4761 acpi_register_spi_device(ctlr, adev);
4762 put_device(&ctlr->dev);
4763 break;
4764 case ACPI_RECONFIG_DEVICE_REMOVE:
4765 if (!acpi_device_enumerated(adev))
4766 break;
4767
4768 spi = acpi_spi_find_device_by_adev(adev);
4769 if (!spi)
4770 break;
4771
4772 spi_unregister_device(spi);
4773 put_device(&spi->dev);
4774 break;
4775 }
4776
4777 return NOTIFY_OK;
4778}
4779
4780static struct notifier_block spi_acpi_notifier = {
4781 .notifier_call = acpi_spi_notify,
4782};
4783#else
4784extern struct notifier_block spi_acpi_notifier;
4785#endif
4786
4787static int __init spi_init(void)
4788{
4789 int status;
4790
4791 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4792 if (!buf) {
4793 status = -ENOMEM;
4794 goto err0;
4795 }
4796
4797 status = bus_register(&spi_bus_type);
4798 if (status < 0)
4799 goto err1;
4800
4801 status = class_register(&spi_master_class);
4802 if (status < 0)
4803 goto err2;
4804
4805 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4806 status = class_register(&spi_slave_class);
4807 if (status < 0)
4808 goto err3;
4809 }
4810
4811 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4812 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4813 if (IS_ENABLED(CONFIG_ACPI))
4814 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4815
4816 return 0;
4817
4818err3:
4819 class_unregister(&spi_master_class);
4820err2:
4821 bus_unregister(&spi_bus_type);
4822err1:
4823 kfree(buf);
4824 buf = NULL;
4825err0:
4826 return status;
4827}
4828
4829/*
4830 * A board_info is normally registered in arch_initcall(),
4831 * but even essential drivers wait till later.
4832 *
4833 * REVISIT only boardinfo really needs static linking. The rest (device and
4834 * driver registration) _could_ be dynamically linked (modular) ... Costs
4835 * include needing to have boardinfo data structures be much more public.
4836 */
4837postcore_initcall(spi_init);