Loading...
1/*
2 * SPI init/core code
3 *
4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/cache.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h>
24#include <linux/mutex.h>
25#include <linux/of_device.h>
26#include <linux/of_irq.h>
27#include <linux/clk/clk-conf.h>
28#include <linux/slab.h>
29#include <linux/mod_devicetable.h>
30#include <linux/spi/spi.h>
31#include <linux/of_gpio.h>
32#include <linux/pm_runtime.h>
33#include <linux/pm_domain.h>
34#include <linux/property.h>
35#include <linux/export.h>
36#include <linux/sched/rt.h>
37#include <uapi/linux/sched/types.h>
38#include <linux/delay.h>
39#include <linux/kthread.h>
40#include <linux/ioport.h>
41#include <linux/acpi.h>
42#include <linux/highmem.h>
43#include <linux/idr.h>
44#include <linux/platform_data/x86/apple.h>
45
46#define CREATE_TRACE_POINTS
47#include <trace/events/spi.h>
48
49static DEFINE_IDR(spi_master_idr);
50
51static void spidev_release(struct device *dev)
52{
53 struct spi_device *spi = to_spi_device(dev);
54
55 /* spi controllers may cleanup for released devices */
56 if (spi->controller->cleanup)
57 spi->controller->cleanup(spi);
58
59 spi_controller_put(spi->controller);
60 kfree(spi);
61}
62
63static ssize_t
64modalias_show(struct device *dev, struct device_attribute *a, char *buf)
65{
66 const struct spi_device *spi = to_spi_device(dev);
67 int len;
68
69 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
70 if (len != -ENODEV)
71 return len;
72
73 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
74}
75static DEVICE_ATTR_RO(modalias);
76
77#define SPI_STATISTICS_ATTRS(field, file) \
78static ssize_t spi_controller_##field##_show(struct device *dev, \
79 struct device_attribute *attr, \
80 char *buf) \
81{ \
82 struct spi_controller *ctlr = container_of(dev, \
83 struct spi_controller, dev); \
84 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
85} \
86static struct device_attribute dev_attr_spi_controller_##field = { \
87 .attr = { .name = file, .mode = 0444 }, \
88 .show = spi_controller_##field##_show, \
89}; \
90static ssize_t spi_device_##field##_show(struct device *dev, \
91 struct device_attribute *attr, \
92 char *buf) \
93{ \
94 struct spi_device *spi = to_spi_device(dev); \
95 return spi_statistics_##field##_show(&spi->statistics, buf); \
96} \
97static struct device_attribute dev_attr_spi_device_##field = { \
98 .attr = { .name = file, .mode = 0444 }, \
99 .show = spi_device_##field##_show, \
100}
101
102#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
103static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
104 char *buf) \
105{ \
106 unsigned long flags; \
107 ssize_t len; \
108 spin_lock_irqsave(&stat->lock, flags); \
109 len = sprintf(buf, format_string, stat->field); \
110 spin_unlock_irqrestore(&stat->lock, flags); \
111 return len; \
112} \
113SPI_STATISTICS_ATTRS(name, file)
114
115#define SPI_STATISTICS_SHOW(field, format_string) \
116 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
117 field, format_string)
118
119SPI_STATISTICS_SHOW(messages, "%lu");
120SPI_STATISTICS_SHOW(transfers, "%lu");
121SPI_STATISTICS_SHOW(errors, "%lu");
122SPI_STATISTICS_SHOW(timedout, "%lu");
123
124SPI_STATISTICS_SHOW(spi_sync, "%lu");
125SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
126SPI_STATISTICS_SHOW(spi_async, "%lu");
127
128SPI_STATISTICS_SHOW(bytes, "%llu");
129SPI_STATISTICS_SHOW(bytes_rx, "%llu");
130SPI_STATISTICS_SHOW(bytes_tx, "%llu");
131
132#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
133 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
134 "transfer_bytes_histo_" number, \
135 transfer_bytes_histo[index], "%lu")
136SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
137SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
138SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
139SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
140SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
141SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
142SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
143SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
144SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
145SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
146SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
147SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
148SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
149SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
150SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
151SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
152SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
153
154SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
155
156static struct attribute *spi_dev_attrs[] = {
157 &dev_attr_modalias.attr,
158 NULL,
159};
160
161static const struct attribute_group spi_dev_group = {
162 .attrs = spi_dev_attrs,
163};
164
165static struct attribute *spi_device_statistics_attrs[] = {
166 &dev_attr_spi_device_messages.attr,
167 &dev_attr_spi_device_transfers.attr,
168 &dev_attr_spi_device_errors.attr,
169 &dev_attr_spi_device_timedout.attr,
170 &dev_attr_spi_device_spi_sync.attr,
171 &dev_attr_spi_device_spi_sync_immediate.attr,
172 &dev_attr_spi_device_spi_async.attr,
173 &dev_attr_spi_device_bytes.attr,
174 &dev_attr_spi_device_bytes_rx.attr,
175 &dev_attr_spi_device_bytes_tx.attr,
176 &dev_attr_spi_device_transfer_bytes_histo0.attr,
177 &dev_attr_spi_device_transfer_bytes_histo1.attr,
178 &dev_attr_spi_device_transfer_bytes_histo2.attr,
179 &dev_attr_spi_device_transfer_bytes_histo3.attr,
180 &dev_attr_spi_device_transfer_bytes_histo4.attr,
181 &dev_attr_spi_device_transfer_bytes_histo5.attr,
182 &dev_attr_spi_device_transfer_bytes_histo6.attr,
183 &dev_attr_spi_device_transfer_bytes_histo7.attr,
184 &dev_attr_spi_device_transfer_bytes_histo8.attr,
185 &dev_attr_spi_device_transfer_bytes_histo9.attr,
186 &dev_attr_spi_device_transfer_bytes_histo10.attr,
187 &dev_attr_spi_device_transfer_bytes_histo11.attr,
188 &dev_attr_spi_device_transfer_bytes_histo12.attr,
189 &dev_attr_spi_device_transfer_bytes_histo13.attr,
190 &dev_attr_spi_device_transfer_bytes_histo14.attr,
191 &dev_attr_spi_device_transfer_bytes_histo15.attr,
192 &dev_attr_spi_device_transfer_bytes_histo16.attr,
193 &dev_attr_spi_device_transfers_split_maxsize.attr,
194 NULL,
195};
196
197static const struct attribute_group spi_device_statistics_group = {
198 .name = "statistics",
199 .attrs = spi_device_statistics_attrs,
200};
201
202static const struct attribute_group *spi_dev_groups[] = {
203 &spi_dev_group,
204 &spi_device_statistics_group,
205 NULL,
206};
207
208static struct attribute *spi_controller_statistics_attrs[] = {
209 &dev_attr_spi_controller_messages.attr,
210 &dev_attr_spi_controller_transfers.attr,
211 &dev_attr_spi_controller_errors.attr,
212 &dev_attr_spi_controller_timedout.attr,
213 &dev_attr_spi_controller_spi_sync.attr,
214 &dev_attr_spi_controller_spi_sync_immediate.attr,
215 &dev_attr_spi_controller_spi_async.attr,
216 &dev_attr_spi_controller_bytes.attr,
217 &dev_attr_spi_controller_bytes_rx.attr,
218 &dev_attr_spi_controller_bytes_tx.attr,
219 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
220 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
221 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
222 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
223 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
224 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
225 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
226 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
227 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
228 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
229 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
230 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
231 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
232 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
233 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
234 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
235 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
236 &dev_attr_spi_controller_transfers_split_maxsize.attr,
237 NULL,
238};
239
240static const struct attribute_group spi_controller_statistics_group = {
241 .name = "statistics",
242 .attrs = spi_controller_statistics_attrs,
243};
244
245static const struct attribute_group *spi_master_groups[] = {
246 &spi_controller_statistics_group,
247 NULL,
248};
249
250void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
251 struct spi_transfer *xfer,
252 struct spi_controller *ctlr)
253{
254 unsigned long flags;
255 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
256
257 if (l2len < 0)
258 l2len = 0;
259
260 spin_lock_irqsave(&stats->lock, flags);
261
262 stats->transfers++;
263 stats->transfer_bytes_histo[l2len]++;
264
265 stats->bytes += xfer->len;
266 if ((xfer->tx_buf) &&
267 (xfer->tx_buf != ctlr->dummy_tx))
268 stats->bytes_tx += xfer->len;
269 if ((xfer->rx_buf) &&
270 (xfer->rx_buf != ctlr->dummy_rx))
271 stats->bytes_rx += xfer->len;
272
273 spin_unlock_irqrestore(&stats->lock, flags);
274}
275EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
276
277/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
278 * and the sysfs version makes coldplug work too.
279 */
280
281static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
282 const struct spi_device *sdev)
283{
284 while (id->name[0]) {
285 if (!strcmp(sdev->modalias, id->name))
286 return id;
287 id++;
288 }
289 return NULL;
290}
291
292const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
293{
294 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
295
296 return spi_match_id(sdrv->id_table, sdev);
297}
298EXPORT_SYMBOL_GPL(spi_get_device_id);
299
300static int spi_match_device(struct device *dev, struct device_driver *drv)
301{
302 const struct spi_device *spi = to_spi_device(dev);
303 const struct spi_driver *sdrv = to_spi_driver(drv);
304
305 /* Attempt an OF style match */
306 if (of_driver_match_device(dev, drv))
307 return 1;
308
309 /* Then try ACPI */
310 if (acpi_driver_match_device(dev, drv))
311 return 1;
312
313 if (sdrv->id_table)
314 return !!spi_match_id(sdrv->id_table, spi);
315
316 return strcmp(spi->modalias, drv->name) == 0;
317}
318
319static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
320{
321 const struct spi_device *spi = to_spi_device(dev);
322 int rc;
323
324 rc = acpi_device_uevent_modalias(dev, env);
325 if (rc != -ENODEV)
326 return rc;
327
328 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
329}
330
331struct bus_type spi_bus_type = {
332 .name = "spi",
333 .dev_groups = spi_dev_groups,
334 .match = spi_match_device,
335 .uevent = spi_uevent,
336};
337EXPORT_SYMBOL_GPL(spi_bus_type);
338
339
340static int spi_drv_probe(struct device *dev)
341{
342 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
343 struct spi_device *spi = to_spi_device(dev);
344 int ret;
345
346 ret = of_clk_set_defaults(dev->of_node, false);
347 if (ret)
348 return ret;
349
350 if (dev->of_node) {
351 spi->irq = of_irq_get(dev->of_node, 0);
352 if (spi->irq == -EPROBE_DEFER)
353 return -EPROBE_DEFER;
354 if (spi->irq < 0)
355 spi->irq = 0;
356 }
357
358 ret = dev_pm_domain_attach(dev, true);
359 if (ret != -EPROBE_DEFER) {
360 ret = sdrv->probe(spi);
361 if (ret)
362 dev_pm_domain_detach(dev, true);
363 }
364
365 return ret;
366}
367
368static int spi_drv_remove(struct device *dev)
369{
370 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
371 int ret;
372
373 ret = sdrv->remove(to_spi_device(dev));
374 dev_pm_domain_detach(dev, true);
375
376 return ret;
377}
378
379static void spi_drv_shutdown(struct device *dev)
380{
381 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
382
383 sdrv->shutdown(to_spi_device(dev));
384}
385
386/**
387 * __spi_register_driver - register a SPI driver
388 * @owner: owner module of the driver to register
389 * @sdrv: the driver to register
390 * Context: can sleep
391 *
392 * Return: zero on success, else a negative error code.
393 */
394int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
395{
396 sdrv->driver.owner = owner;
397 sdrv->driver.bus = &spi_bus_type;
398 if (sdrv->probe)
399 sdrv->driver.probe = spi_drv_probe;
400 if (sdrv->remove)
401 sdrv->driver.remove = spi_drv_remove;
402 if (sdrv->shutdown)
403 sdrv->driver.shutdown = spi_drv_shutdown;
404 return driver_register(&sdrv->driver);
405}
406EXPORT_SYMBOL_GPL(__spi_register_driver);
407
408/*-------------------------------------------------------------------------*/
409
410/* SPI devices should normally not be created by SPI device drivers; that
411 * would make them board-specific. Similarly with SPI controller drivers.
412 * Device registration normally goes into like arch/.../mach.../board-YYY.c
413 * with other readonly (flashable) information about mainboard devices.
414 */
415
416struct boardinfo {
417 struct list_head list;
418 struct spi_board_info board_info;
419};
420
421static LIST_HEAD(board_list);
422static LIST_HEAD(spi_controller_list);
423
424/*
425 * Used to protect add/del opertion for board_info list and
426 * spi_controller list, and their matching process
427 * also used to protect object of type struct idr
428 */
429static DEFINE_MUTEX(board_lock);
430
431/**
432 * spi_alloc_device - Allocate a new SPI device
433 * @ctlr: Controller to which device is connected
434 * Context: can sleep
435 *
436 * Allows a driver to allocate and initialize a spi_device without
437 * registering it immediately. This allows a driver to directly
438 * fill the spi_device with device parameters before calling
439 * spi_add_device() on it.
440 *
441 * Caller is responsible to call spi_add_device() on the returned
442 * spi_device structure to add it to the SPI controller. If the caller
443 * needs to discard the spi_device without adding it, then it should
444 * call spi_dev_put() on it.
445 *
446 * Return: a pointer to the new device, or NULL.
447 */
448struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
449{
450 struct spi_device *spi;
451
452 if (!spi_controller_get(ctlr))
453 return NULL;
454
455 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
456 if (!spi) {
457 spi_controller_put(ctlr);
458 return NULL;
459 }
460
461 spi->master = spi->controller = ctlr;
462 spi->dev.parent = &ctlr->dev;
463 spi->dev.bus = &spi_bus_type;
464 spi->dev.release = spidev_release;
465 spi->cs_gpio = -ENOENT;
466
467 spin_lock_init(&spi->statistics.lock);
468
469 device_initialize(&spi->dev);
470 return spi;
471}
472EXPORT_SYMBOL_GPL(spi_alloc_device);
473
474static void spi_dev_set_name(struct spi_device *spi)
475{
476 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
477
478 if (adev) {
479 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
480 return;
481 }
482
483 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
484 spi->chip_select);
485}
486
487static int spi_dev_check(struct device *dev, void *data)
488{
489 struct spi_device *spi = to_spi_device(dev);
490 struct spi_device *new_spi = data;
491
492 if (spi->controller == new_spi->controller &&
493 spi->chip_select == new_spi->chip_select)
494 return -EBUSY;
495 return 0;
496}
497
498/**
499 * spi_add_device - Add spi_device allocated with spi_alloc_device
500 * @spi: spi_device to register
501 *
502 * Companion function to spi_alloc_device. Devices allocated with
503 * spi_alloc_device can be added onto the spi bus with this function.
504 *
505 * Return: 0 on success; negative errno on failure
506 */
507int spi_add_device(struct spi_device *spi)
508{
509 static DEFINE_MUTEX(spi_add_lock);
510 struct spi_controller *ctlr = spi->controller;
511 struct device *dev = ctlr->dev.parent;
512 int status;
513
514 /* Chipselects are numbered 0..max; validate. */
515 if (spi->chip_select >= ctlr->num_chipselect) {
516 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
517 ctlr->num_chipselect);
518 return -EINVAL;
519 }
520
521 /* Set the bus ID string */
522 spi_dev_set_name(spi);
523
524 /* We need to make sure there's no other device with this
525 * chipselect **BEFORE** we call setup(), else we'll trash
526 * its configuration. Lock against concurrent add() calls.
527 */
528 mutex_lock(&spi_add_lock);
529
530 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
531 if (status) {
532 dev_err(dev, "chipselect %d already in use\n",
533 spi->chip_select);
534 goto done;
535 }
536
537 if (ctlr->cs_gpios)
538 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
539
540 /* Drivers may modify this initial i/o setup, but will
541 * normally rely on the device being setup. Devices
542 * using SPI_CS_HIGH can't coexist well otherwise...
543 */
544 status = spi_setup(spi);
545 if (status < 0) {
546 dev_err(dev, "can't setup %s, status %d\n",
547 dev_name(&spi->dev), status);
548 goto done;
549 }
550
551 /* Device may be bound to an active driver when this returns */
552 status = device_add(&spi->dev);
553 if (status < 0)
554 dev_err(dev, "can't add %s, status %d\n",
555 dev_name(&spi->dev), status);
556 else
557 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
558
559done:
560 mutex_unlock(&spi_add_lock);
561 return status;
562}
563EXPORT_SYMBOL_GPL(spi_add_device);
564
565/**
566 * spi_new_device - instantiate one new SPI device
567 * @ctlr: Controller to which device is connected
568 * @chip: Describes the SPI device
569 * Context: can sleep
570 *
571 * On typical mainboards, this is purely internal; and it's not needed
572 * after board init creates the hard-wired devices. Some development
573 * platforms may not be able to use spi_register_board_info though, and
574 * this is exported so that for example a USB or parport based adapter
575 * driver could add devices (which it would learn about out-of-band).
576 *
577 * Return: the new device, or NULL.
578 */
579struct spi_device *spi_new_device(struct spi_controller *ctlr,
580 struct spi_board_info *chip)
581{
582 struct spi_device *proxy;
583 int status;
584
585 /* NOTE: caller did any chip->bus_num checks necessary.
586 *
587 * Also, unless we change the return value convention to use
588 * error-or-pointer (not NULL-or-pointer), troubleshootability
589 * suggests syslogged diagnostics are best here (ugh).
590 */
591
592 proxy = spi_alloc_device(ctlr);
593 if (!proxy)
594 return NULL;
595
596 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
597
598 proxy->chip_select = chip->chip_select;
599 proxy->max_speed_hz = chip->max_speed_hz;
600 proxy->mode = chip->mode;
601 proxy->irq = chip->irq;
602 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
603 proxy->dev.platform_data = (void *) chip->platform_data;
604 proxy->controller_data = chip->controller_data;
605 proxy->controller_state = NULL;
606
607 if (chip->properties) {
608 status = device_add_properties(&proxy->dev, chip->properties);
609 if (status) {
610 dev_err(&ctlr->dev,
611 "failed to add properties to '%s': %d\n",
612 chip->modalias, status);
613 goto err_dev_put;
614 }
615 }
616
617 status = spi_add_device(proxy);
618 if (status < 0)
619 goto err_remove_props;
620
621 return proxy;
622
623err_remove_props:
624 if (chip->properties)
625 device_remove_properties(&proxy->dev);
626err_dev_put:
627 spi_dev_put(proxy);
628 return NULL;
629}
630EXPORT_SYMBOL_GPL(spi_new_device);
631
632/**
633 * spi_unregister_device - unregister a single SPI device
634 * @spi: spi_device to unregister
635 *
636 * Start making the passed SPI device vanish. Normally this would be handled
637 * by spi_unregister_controller().
638 */
639void spi_unregister_device(struct spi_device *spi)
640{
641 if (!spi)
642 return;
643
644 if (spi->dev.of_node) {
645 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
646 of_node_put(spi->dev.of_node);
647 }
648 if (ACPI_COMPANION(&spi->dev))
649 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
650 device_unregister(&spi->dev);
651}
652EXPORT_SYMBOL_GPL(spi_unregister_device);
653
654static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
655 struct spi_board_info *bi)
656{
657 struct spi_device *dev;
658
659 if (ctlr->bus_num != bi->bus_num)
660 return;
661
662 dev = spi_new_device(ctlr, bi);
663 if (!dev)
664 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
665 bi->modalias);
666}
667
668/**
669 * spi_register_board_info - register SPI devices for a given board
670 * @info: array of chip descriptors
671 * @n: how many descriptors are provided
672 * Context: can sleep
673 *
674 * Board-specific early init code calls this (probably during arch_initcall)
675 * with segments of the SPI device table. Any device nodes are created later,
676 * after the relevant parent SPI controller (bus_num) is defined. We keep
677 * this table of devices forever, so that reloading a controller driver will
678 * not make Linux forget about these hard-wired devices.
679 *
680 * Other code can also call this, e.g. a particular add-on board might provide
681 * SPI devices through its expansion connector, so code initializing that board
682 * would naturally declare its SPI devices.
683 *
684 * The board info passed can safely be __initdata ... but be careful of
685 * any embedded pointers (platform_data, etc), they're copied as-is.
686 * Device properties are deep-copied though.
687 *
688 * Return: zero on success, else a negative error code.
689 */
690int spi_register_board_info(struct spi_board_info const *info, unsigned n)
691{
692 struct boardinfo *bi;
693 int i;
694
695 if (!n)
696 return 0;
697
698 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
699 if (!bi)
700 return -ENOMEM;
701
702 for (i = 0; i < n; i++, bi++, info++) {
703 struct spi_controller *ctlr;
704
705 memcpy(&bi->board_info, info, sizeof(*info));
706 if (info->properties) {
707 bi->board_info.properties =
708 property_entries_dup(info->properties);
709 if (IS_ERR(bi->board_info.properties))
710 return PTR_ERR(bi->board_info.properties);
711 }
712
713 mutex_lock(&board_lock);
714 list_add_tail(&bi->list, &board_list);
715 list_for_each_entry(ctlr, &spi_controller_list, list)
716 spi_match_controller_to_boardinfo(ctlr,
717 &bi->board_info);
718 mutex_unlock(&board_lock);
719 }
720
721 return 0;
722}
723
724/*-------------------------------------------------------------------------*/
725
726static void spi_set_cs(struct spi_device *spi, bool enable)
727{
728 if (spi->mode & SPI_CS_HIGH)
729 enable = !enable;
730
731 if (gpio_is_valid(spi->cs_gpio)) {
732 gpio_set_value(spi->cs_gpio, !enable);
733 /* Some SPI masters need both GPIO CS & slave_select */
734 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
735 spi->controller->set_cs)
736 spi->controller->set_cs(spi, !enable);
737 } else if (spi->controller->set_cs) {
738 spi->controller->set_cs(spi, !enable);
739 }
740}
741
742#ifdef CONFIG_HAS_DMA
743static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
744 struct sg_table *sgt, void *buf, size_t len,
745 enum dma_data_direction dir)
746{
747 const bool vmalloced_buf = is_vmalloc_addr(buf);
748 unsigned int max_seg_size = dma_get_max_seg_size(dev);
749#ifdef CONFIG_HIGHMEM
750 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
751 (unsigned long)buf < (PKMAP_BASE +
752 (LAST_PKMAP * PAGE_SIZE)));
753#else
754 const bool kmap_buf = false;
755#endif
756 int desc_len;
757 int sgs;
758 struct page *vm_page;
759 struct scatterlist *sg;
760 void *sg_buf;
761 size_t min;
762 int i, ret;
763
764 if (vmalloced_buf || kmap_buf) {
765 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
766 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
767 } else if (virt_addr_valid(buf)) {
768 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
769 sgs = DIV_ROUND_UP(len, desc_len);
770 } else {
771 return -EINVAL;
772 }
773
774 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
775 if (ret != 0)
776 return ret;
777
778 sg = &sgt->sgl[0];
779 for (i = 0; i < sgs; i++) {
780
781 if (vmalloced_buf || kmap_buf) {
782 /*
783 * Next scatterlist entry size is the minimum between
784 * the desc_len and the remaining buffer length that
785 * fits in a page.
786 */
787 min = min_t(size_t, desc_len,
788 min_t(size_t, len,
789 PAGE_SIZE - offset_in_page(buf)));
790 if (vmalloced_buf)
791 vm_page = vmalloc_to_page(buf);
792 else
793 vm_page = kmap_to_page(buf);
794 if (!vm_page) {
795 sg_free_table(sgt);
796 return -ENOMEM;
797 }
798 sg_set_page(sg, vm_page,
799 min, offset_in_page(buf));
800 } else {
801 min = min_t(size_t, len, desc_len);
802 sg_buf = buf;
803 sg_set_buf(sg, sg_buf, min);
804 }
805
806 buf += min;
807 len -= min;
808 sg = sg_next(sg);
809 }
810
811 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
812 if (!ret)
813 ret = -ENOMEM;
814 if (ret < 0) {
815 sg_free_table(sgt);
816 return ret;
817 }
818
819 sgt->nents = ret;
820
821 return 0;
822}
823
824static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
825 struct sg_table *sgt, enum dma_data_direction dir)
826{
827 if (sgt->orig_nents) {
828 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
829 sg_free_table(sgt);
830 }
831}
832
833static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
834{
835 struct device *tx_dev, *rx_dev;
836 struct spi_transfer *xfer;
837 int ret;
838
839 if (!ctlr->can_dma)
840 return 0;
841
842 if (ctlr->dma_tx)
843 tx_dev = ctlr->dma_tx->device->dev;
844 else
845 tx_dev = ctlr->dev.parent;
846
847 if (ctlr->dma_rx)
848 rx_dev = ctlr->dma_rx->device->dev;
849 else
850 rx_dev = ctlr->dev.parent;
851
852 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
853 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
854 continue;
855
856 if (xfer->tx_buf != NULL) {
857 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
858 (void *)xfer->tx_buf, xfer->len,
859 DMA_TO_DEVICE);
860 if (ret != 0)
861 return ret;
862 }
863
864 if (xfer->rx_buf != NULL) {
865 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
866 xfer->rx_buf, xfer->len,
867 DMA_FROM_DEVICE);
868 if (ret != 0) {
869 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
870 DMA_TO_DEVICE);
871 return ret;
872 }
873 }
874 }
875
876 ctlr->cur_msg_mapped = true;
877
878 return 0;
879}
880
881static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
882{
883 struct spi_transfer *xfer;
884 struct device *tx_dev, *rx_dev;
885
886 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
887 return 0;
888
889 if (ctlr->dma_tx)
890 tx_dev = ctlr->dma_tx->device->dev;
891 else
892 tx_dev = ctlr->dev.parent;
893
894 if (ctlr->dma_rx)
895 rx_dev = ctlr->dma_rx->device->dev;
896 else
897 rx_dev = ctlr->dev.parent;
898
899 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
900 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
901 continue;
902
903 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
904 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
905 }
906
907 return 0;
908}
909#else /* !CONFIG_HAS_DMA */
910static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
911 struct sg_table *sgt, void *buf, size_t len,
912 enum dma_data_direction dir)
913{
914 return -EINVAL;
915}
916
917static inline void spi_unmap_buf(struct spi_controller *ctlr,
918 struct device *dev, struct sg_table *sgt,
919 enum dma_data_direction dir)
920{
921}
922
923static inline int __spi_map_msg(struct spi_controller *ctlr,
924 struct spi_message *msg)
925{
926 return 0;
927}
928
929static inline int __spi_unmap_msg(struct spi_controller *ctlr,
930 struct spi_message *msg)
931{
932 return 0;
933}
934#endif /* !CONFIG_HAS_DMA */
935
936static inline int spi_unmap_msg(struct spi_controller *ctlr,
937 struct spi_message *msg)
938{
939 struct spi_transfer *xfer;
940
941 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
942 /*
943 * Restore the original value of tx_buf or rx_buf if they are
944 * NULL.
945 */
946 if (xfer->tx_buf == ctlr->dummy_tx)
947 xfer->tx_buf = NULL;
948 if (xfer->rx_buf == ctlr->dummy_rx)
949 xfer->rx_buf = NULL;
950 }
951
952 return __spi_unmap_msg(ctlr, msg);
953}
954
955static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
956{
957 struct spi_transfer *xfer;
958 void *tmp;
959 unsigned int max_tx, max_rx;
960
961 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
962 max_tx = 0;
963 max_rx = 0;
964
965 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
966 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
967 !xfer->tx_buf)
968 max_tx = max(xfer->len, max_tx);
969 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
970 !xfer->rx_buf)
971 max_rx = max(xfer->len, max_rx);
972 }
973
974 if (max_tx) {
975 tmp = krealloc(ctlr->dummy_tx, max_tx,
976 GFP_KERNEL | GFP_DMA);
977 if (!tmp)
978 return -ENOMEM;
979 ctlr->dummy_tx = tmp;
980 memset(tmp, 0, max_tx);
981 }
982
983 if (max_rx) {
984 tmp = krealloc(ctlr->dummy_rx, max_rx,
985 GFP_KERNEL | GFP_DMA);
986 if (!tmp)
987 return -ENOMEM;
988 ctlr->dummy_rx = tmp;
989 }
990
991 if (max_tx || max_rx) {
992 list_for_each_entry(xfer, &msg->transfers,
993 transfer_list) {
994 if (!xfer->tx_buf)
995 xfer->tx_buf = ctlr->dummy_tx;
996 if (!xfer->rx_buf)
997 xfer->rx_buf = ctlr->dummy_rx;
998 }
999 }
1000 }
1001
1002 return __spi_map_msg(ctlr, msg);
1003}
1004
1005/*
1006 * spi_transfer_one_message - Default implementation of transfer_one_message()
1007 *
1008 * This is a standard implementation of transfer_one_message() for
1009 * drivers which implement a transfer_one() operation. It provides
1010 * standard handling of delays and chip select management.
1011 */
1012static int spi_transfer_one_message(struct spi_controller *ctlr,
1013 struct spi_message *msg)
1014{
1015 struct spi_transfer *xfer;
1016 bool keep_cs = false;
1017 int ret = 0;
1018 unsigned long long ms = 1;
1019 struct spi_statistics *statm = &ctlr->statistics;
1020 struct spi_statistics *stats = &msg->spi->statistics;
1021
1022 spi_set_cs(msg->spi, true);
1023
1024 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1025 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1026
1027 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1028 trace_spi_transfer_start(msg, xfer);
1029
1030 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1031 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1032
1033 if (xfer->tx_buf || xfer->rx_buf) {
1034 reinit_completion(&ctlr->xfer_completion);
1035
1036 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1037 if (ret < 0) {
1038 SPI_STATISTICS_INCREMENT_FIELD(statm,
1039 errors);
1040 SPI_STATISTICS_INCREMENT_FIELD(stats,
1041 errors);
1042 dev_err(&msg->spi->dev,
1043 "SPI transfer failed: %d\n", ret);
1044 goto out;
1045 }
1046
1047 if (ret > 0) {
1048 ret = 0;
1049 ms = 8LL * 1000LL * xfer->len;
1050 do_div(ms, xfer->speed_hz);
1051 ms += ms + 200; /* some tolerance */
1052
1053 if (ms > UINT_MAX)
1054 ms = UINT_MAX;
1055
1056 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1057 msecs_to_jiffies(ms));
1058 }
1059
1060 if (ms == 0) {
1061 SPI_STATISTICS_INCREMENT_FIELD(statm,
1062 timedout);
1063 SPI_STATISTICS_INCREMENT_FIELD(stats,
1064 timedout);
1065 dev_err(&msg->spi->dev,
1066 "SPI transfer timed out\n");
1067 msg->status = -ETIMEDOUT;
1068 }
1069 } else {
1070 if (xfer->len)
1071 dev_err(&msg->spi->dev,
1072 "Bufferless transfer has length %u\n",
1073 xfer->len);
1074 }
1075
1076 trace_spi_transfer_stop(msg, xfer);
1077
1078 if (msg->status != -EINPROGRESS)
1079 goto out;
1080
1081 if (xfer->delay_usecs) {
1082 u16 us = xfer->delay_usecs;
1083
1084 if (us <= 10)
1085 udelay(us);
1086 else
1087 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1088 }
1089
1090 if (xfer->cs_change) {
1091 if (list_is_last(&xfer->transfer_list,
1092 &msg->transfers)) {
1093 keep_cs = true;
1094 } else {
1095 spi_set_cs(msg->spi, false);
1096 udelay(10);
1097 spi_set_cs(msg->spi, true);
1098 }
1099 }
1100
1101 msg->actual_length += xfer->len;
1102 }
1103
1104out:
1105 if (ret != 0 || !keep_cs)
1106 spi_set_cs(msg->spi, false);
1107
1108 if (msg->status == -EINPROGRESS)
1109 msg->status = ret;
1110
1111 if (msg->status && ctlr->handle_err)
1112 ctlr->handle_err(ctlr, msg);
1113
1114 spi_res_release(ctlr, msg);
1115
1116 spi_finalize_current_message(ctlr);
1117
1118 return ret;
1119}
1120
1121/**
1122 * spi_finalize_current_transfer - report completion of a transfer
1123 * @ctlr: the controller reporting completion
1124 *
1125 * Called by SPI drivers using the core transfer_one_message()
1126 * implementation to notify it that the current interrupt driven
1127 * transfer has finished and the next one may be scheduled.
1128 */
1129void spi_finalize_current_transfer(struct spi_controller *ctlr)
1130{
1131 complete(&ctlr->xfer_completion);
1132}
1133EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1134
1135/**
1136 * __spi_pump_messages - function which processes spi message queue
1137 * @ctlr: controller to process queue for
1138 * @in_kthread: true if we are in the context of the message pump thread
1139 *
1140 * This function checks if there is any spi message in the queue that
1141 * needs processing and if so call out to the driver to initialize hardware
1142 * and transfer each message.
1143 *
1144 * Note that it is called both from the kthread itself and also from
1145 * inside spi_sync(); the queue extraction handling at the top of the
1146 * function should deal with this safely.
1147 */
1148static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1149{
1150 unsigned long flags;
1151 bool was_busy = false;
1152 int ret;
1153
1154 /* Lock queue */
1155 spin_lock_irqsave(&ctlr->queue_lock, flags);
1156
1157 /* Make sure we are not already running a message */
1158 if (ctlr->cur_msg) {
1159 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1160 return;
1161 }
1162
1163 /* If another context is idling the device then defer */
1164 if (ctlr->idling) {
1165 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1166 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1167 return;
1168 }
1169
1170 /* Check if the queue is idle */
1171 if (list_empty(&ctlr->queue) || !ctlr->running) {
1172 if (!ctlr->busy) {
1173 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1174 return;
1175 }
1176
1177 /* Only do teardown in the thread */
1178 if (!in_kthread) {
1179 kthread_queue_work(&ctlr->kworker,
1180 &ctlr->pump_messages);
1181 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1182 return;
1183 }
1184
1185 ctlr->busy = false;
1186 ctlr->idling = true;
1187 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1188
1189 kfree(ctlr->dummy_rx);
1190 ctlr->dummy_rx = NULL;
1191 kfree(ctlr->dummy_tx);
1192 ctlr->dummy_tx = NULL;
1193 if (ctlr->unprepare_transfer_hardware &&
1194 ctlr->unprepare_transfer_hardware(ctlr))
1195 dev_err(&ctlr->dev,
1196 "failed to unprepare transfer hardware\n");
1197 if (ctlr->auto_runtime_pm) {
1198 pm_runtime_mark_last_busy(ctlr->dev.parent);
1199 pm_runtime_put_autosuspend(ctlr->dev.parent);
1200 }
1201 trace_spi_controller_idle(ctlr);
1202
1203 spin_lock_irqsave(&ctlr->queue_lock, flags);
1204 ctlr->idling = false;
1205 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1206 return;
1207 }
1208
1209 /* Extract head of queue */
1210 ctlr->cur_msg =
1211 list_first_entry(&ctlr->queue, struct spi_message, queue);
1212
1213 list_del_init(&ctlr->cur_msg->queue);
1214 if (ctlr->busy)
1215 was_busy = true;
1216 else
1217 ctlr->busy = true;
1218 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1219
1220 mutex_lock(&ctlr->io_mutex);
1221
1222 if (!was_busy && ctlr->auto_runtime_pm) {
1223 ret = pm_runtime_get_sync(ctlr->dev.parent);
1224 if (ret < 0) {
1225 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1226 ret);
1227 mutex_unlock(&ctlr->io_mutex);
1228 return;
1229 }
1230 }
1231
1232 if (!was_busy)
1233 trace_spi_controller_busy(ctlr);
1234
1235 if (!was_busy && ctlr->prepare_transfer_hardware) {
1236 ret = ctlr->prepare_transfer_hardware(ctlr);
1237 if (ret) {
1238 dev_err(&ctlr->dev,
1239 "failed to prepare transfer hardware\n");
1240
1241 if (ctlr->auto_runtime_pm)
1242 pm_runtime_put(ctlr->dev.parent);
1243 mutex_unlock(&ctlr->io_mutex);
1244 return;
1245 }
1246 }
1247
1248 trace_spi_message_start(ctlr->cur_msg);
1249
1250 if (ctlr->prepare_message) {
1251 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1252 if (ret) {
1253 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1254 ret);
1255 ctlr->cur_msg->status = ret;
1256 spi_finalize_current_message(ctlr);
1257 goto out;
1258 }
1259 ctlr->cur_msg_prepared = true;
1260 }
1261
1262 ret = spi_map_msg(ctlr, ctlr->cur_msg);
1263 if (ret) {
1264 ctlr->cur_msg->status = ret;
1265 spi_finalize_current_message(ctlr);
1266 goto out;
1267 }
1268
1269 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1270 if (ret) {
1271 dev_err(&ctlr->dev,
1272 "failed to transfer one message from queue\n");
1273 goto out;
1274 }
1275
1276out:
1277 mutex_unlock(&ctlr->io_mutex);
1278
1279 /* Prod the scheduler in case transfer_one() was busy waiting */
1280 if (!ret)
1281 cond_resched();
1282}
1283
1284/**
1285 * spi_pump_messages - kthread work function which processes spi message queue
1286 * @work: pointer to kthread work struct contained in the controller struct
1287 */
1288static void spi_pump_messages(struct kthread_work *work)
1289{
1290 struct spi_controller *ctlr =
1291 container_of(work, struct spi_controller, pump_messages);
1292
1293 __spi_pump_messages(ctlr, true);
1294}
1295
1296static int spi_init_queue(struct spi_controller *ctlr)
1297{
1298 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1299
1300 ctlr->running = false;
1301 ctlr->busy = false;
1302
1303 kthread_init_worker(&ctlr->kworker);
1304 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1305 "%s", dev_name(&ctlr->dev));
1306 if (IS_ERR(ctlr->kworker_task)) {
1307 dev_err(&ctlr->dev, "failed to create message pump task\n");
1308 return PTR_ERR(ctlr->kworker_task);
1309 }
1310 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1311
1312 /*
1313 * Controller config will indicate if this controller should run the
1314 * message pump with high (realtime) priority to reduce the transfer
1315 * latency on the bus by minimising the delay between a transfer
1316 * request and the scheduling of the message pump thread. Without this
1317 * setting the message pump thread will remain at default priority.
1318 */
1319 if (ctlr->rt) {
1320 dev_info(&ctlr->dev,
1321 "will run message pump with realtime priority\n");
1322 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
1323 }
1324
1325 return 0;
1326}
1327
1328/**
1329 * spi_get_next_queued_message() - called by driver to check for queued
1330 * messages
1331 * @ctlr: the controller to check for queued messages
1332 *
1333 * If there are more messages in the queue, the next message is returned from
1334 * this call.
1335 *
1336 * Return: the next message in the queue, else NULL if the queue is empty.
1337 */
1338struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1339{
1340 struct spi_message *next;
1341 unsigned long flags;
1342
1343 /* get a pointer to the next message, if any */
1344 spin_lock_irqsave(&ctlr->queue_lock, flags);
1345 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1346 queue);
1347 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1348
1349 return next;
1350}
1351EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1352
1353/**
1354 * spi_finalize_current_message() - the current message is complete
1355 * @ctlr: the controller to return the message to
1356 *
1357 * Called by the driver to notify the core that the message in the front of the
1358 * queue is complete and can be removed from the queue.
1359 */
1360void spi_finalize_current_message(struct spi_controller *ctlr)
1361{
1362 struct spi_message *mesg;
1363 unsigned long flags;
1364 int ret;
1365
1366 spin_lock_irqsave(&ctlr->queue_lock, flags);
1367 mesg = ctlr->cur_msg;
1368 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1369
1370 spi_unmap_msg(ctlr, mesg);
1371
1372 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1373 ret = ctlr->unprepare_message(ctlr, mesg);
1374 if (ret) {
1375 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1376 ret);
1377 }
1378 }
1379
1380 spin_lock_irqsave(&ctlr->queue_lock, flags);
1381 ctlr->cur_msg = NULL;
1382 ctlr->cur_msg_prepared = false;
1383 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1384 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1385
1386 trace_spi_message_done(mesg);
1387
1388 mesg->state = NULL;
1389 if (mesg->complete)
1390 mesg->complete(mesg->context);
1391}
1392EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1393
1394static int spi_start_queue(struct spi_controller *ctlr)
1395{
1396 unsigned long flags;
1397
1398 spin_lock_irqsave(&ctlr->queue_lock, flags);
1399
1400 if (ctlr->running || ctlr->busy) {
1401 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1402 return -EBUSY;
1403 }
1404
1405 ctlr->running = true;
1406 ctlr->cur_msg = NULL;
1407 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1408
1409 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1410
1411 return 0;
1412}
1413
1414static int spi_stop_queue(struct spi_controller *ctlr)
1415{
1416 unsigned long flags;
1417 unsigned limit = 500;
1418 int ret = 0;
1419
1420 spin_lock_irqsave(&ctlr->queue_lock, flags);
1421
1422 /*
1423 * This is a bit lame, but is optimized for the common execution path.
1424 * A wait_queue on the ctlr->busy could be used, but then the common
1425 * execution path (pump_messages) would be required to call wake_up or
1426 * friends on every SPI message. Do this instead.
1427 */
1428 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1429 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1430 usleep_range(10000, 11000);
1431 spin_lock_irqsave(&ctlr->queue_lock, flags);
1432 }
1433
1434 if (!list_empty(&ctlr->queue) || ctlr->busy)
1435 ret = -EBUSY;
1436 else
1437 ctlr->running = false;
1438
1439 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1440
1441 if (ret) {
1442 dev_warn(&ctlr->dev, "could not stop message queue\n");
1443 return ret;
1444 }
1445 return ret;
1446}
1447
1448static int spi_destroy_queue(struct spi_controller *ctlr)
1449{
1450 int ret;
1451
1452 ret = spi_stop_queue(ctlr);
1453
1454 /*
1455 * kthread_flush_worker will block until all work is done.
1456 * If the reason that stop_queue timed out is that the work will never
1457 * finish, then it does no good to call flush/stop thread, so
1458 * return anyway.
1459 */
1460 if (ret) {
1461 dev_err(&ctlr->dev, "problem destroying queue\n");
1462 return ret;
1463 }
1464
1465 kthread_flush_worker(&ctlr->kworker);
1466 kthread_stop(ctlr->kworker_task);
1467
1468 return 0;
1469}
1470
1471static int __spi_queued_transfer(struct spi_device *spi,
1472 struct spi_message *msg,
1473 bool need_pump)
1474{
1475 struct spi_controller *ctlr = spi->controller;
1476 unsigned long flags;
1477
1478 spin_lock_irqsave(&ctlr->queue_lock, flags);
1479
1480 if (!ctlr->running) {
1481 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1482 return -ESHUTDOWN;
1483 }
1484 msg->actual_length = 0;
1485 msg->status = -EINPROGRESS;
1486
1487 list_add_tail(&msg->queue, &ctlr->queue);
1488 if (!ctlr->busy && need_pump)
1489 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1490
1491 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1492 return 0;
1493}
1494
1495/**
1496 * spi_queued_transfer - transfer function for queued transfers
1497 * @spi: spi device which is requesting transfer
1498 * @msg: spi message which is to handled is queued to driver queue
1499 *
1500 * Return: zero on success, else a negative error code.
1501 */
1502static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1503{
1504 return __spi_queued_transfer(spi, msg, true);
1505}
1506
1507static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1508{
1509 int ret;
1510
1511 ctlr->transfer = spi_queued_transfer;
1512 if (!ctlr->transfer_one_message)
1513 ctlr->transfer_one_message = spi_transfer_one_message;
1514
1515 /* Initialize and start queue */
1516 ret = spi_init_queue(ctlr);
1517 if (ret) {
1518 dev_err(&ctlr->dev, "problem initializing queue\n");
1519 goto err_init_queue;
1520 }
1521 ctlr->queued = true;
1522 ret = spi_start_queue(ctlr);
1523 if (ret) {
1524 dev_err(&ctlr->dev, "problem starting queue\n");
1525 goto err_start_queue;
1526 }
1527
1528 return 0;
1529
1530err_start_queue:
1531 spi_destroy_queue(ctlr);
1532err_init_queue:
1533 return ret;
1534}
1535
1536/*-------------------------------------------------------------------------*/
1537
1538#if defined(CONFIG_OF)
1539static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1540 struct device_node *nc)
1541{
1542 u32 value;
1543 int rc;
1544
1545 /* Mode (clock phase/polarity/etc.) */
1546 if (of_property_read_bool(nc, "spi-cpha"))
1547 spi->mode |= SPI_CPHA;
1548 if (of_property_read_bool(nc, "spi-cpol"))
1549 spi->mode |= SPI_CPOL;
1550 if (of_property_read_bool(nc, "spi-cs-high"))
1551 spi->mode |= SPI_CS_HIGH;
1552 if (of_property_read_bool(nc, "spi-3wire"))
1553 spi->mode |= SPI_3WIRE;
1554 if (of_property_read_bool(nc, "spi-lsb-first"))
1555 spi->mode |= SPI_LSB_FIRST;
1556
1557 /* Device DUAL/QUAD mode */
1558 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1559 switch (value) {
1560 case 1:
1561 break;
1562 case 2:
1563 spi->mode |= SPI_TX_DUAL;
1564 break;
1565 case 4:
1566 spi->mode |= SPI_TX_QUAD;
1567 break;
1568 default:
1569 dev_warn(&ctlr->dev,
1570 "spi-tx-bus-width %d not supported\n",
1571 value);
1572 break;
1573 }
1574 }
1575
1576 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1577 switch (value) {
1578 case 1:
1579 break;
1580 case 2:
1581 spi->mode |= SPI_RX_DUAL;
1582 break;
1583 case 4:
1584 spi->mode |= SPI_RX_QUAD;
1585 break;
1586 default:
1587 dev_warn(&ctlr->dev,
1588 "spi-rx-bus-width %d not supported\n",
1589 value);
1590 break;
1591 }
1592 }
1593
1594 if (spi_controller_is_slave(ctlr)) {
1595 if (strcmp(nc->name, "slave")) {
1596 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1597 nc);
1598 return -EINVAL;
1599 }
1600 return 0;
1601 }
1602
1603 /* Device address */
1604 rc = of_property_read_u32(nc, "reg", &value);
1605 if (rc) {
1606 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1607 nc, rc);
1608 return rc;
1609 }
1610 spi->chip_select = value;
1611
1612 /* Device speed */
1613 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1614 if (rc) {
1615 dev_err(&ctlr->dev,
1616 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1617 return rc;
1618 }
1619 spi->max_speed_hz = value;
1620
1621 return 0;
1622}
1623
1624static struct spi_device *
1625of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1626{
1627 struct spi_device *spi;
1628 int rc;
1629
1630 /* Alloc an spi_device */
1631 spi = spi_alloc_device(ctlr);
1632 if (!spi) {
1633 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1634 rc = -ENOMEM;
1635 goto err_out;
1636 }
1637
1638 /* Select device driver */
1639 rc = of_modalias_node(nc, spi->modalias,
1640 sizeof(spi->modalias));
1641 if (rc < 0) {
1642 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1643 goto err_out;
1644 }
1645
1646 rc = of_spi_parse_dt(ctlr, spi, nc);
1647 if (rc)
1648 goto err_out;
1649
1650 /* Store a pointer to the node in the device structure */
1651 of_node_get(nc);
1652 spi->dev.of_node = nc;
1653
1654 /* Register the new device */
1655 rc = spi_add_device(spi);
1656 if (rc) {
1657 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1658 goto err_of_node_put;
1659 }
1660
1661 return spi;
1662
1663err_of_node_put:
1664 of_node_put(nc);
1665err_out:
1666 spi_dev_put(spi);
1667 return ERR_PTR(rc);
1668}
1669
1670/**
1671 * of_register_spi_devices() - Register child devices onto the SPI bus
1672 * @ctlr: Pointer to spi_controller device
1673 *
1674 * Registers an spi_device for each child node of controller node which
1675 * represents a valid SPI slave.
1676 */
1677static void of_register_spi_devices(struct spi_controller *ctlr)
1678{
1679 struct spi_device *spi;
1680 struct device_node *nc;
1681
1682 if (!ctlr->dev.of_node)
1683 return;
1684
1685 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1686 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1687 continue;
1688 spi = of_register_spi_device(ctlr, nc);
1689 if (IS_ERR(spi)) {
1690 dev_warn(&ctlr->dev,
1691 "Failed to create SPI device for %pOF\n", nc);
1692 of_node_clear_flag(nc, OF_POPULATED);
1693 }
1694 }
1695}
1696#else
1697static void of_register_spi_devices(struct spi_controller *ctlr) { }
1698#endif
1699
1700#ifdef CONFIG_ACPI
1701static void acpi_spi_parse_apple_properties(struct spi_device *spi)
1702{
1703 struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
1704 const union acpi_object *obj;
1705
1706 if (!x86_apple_machine)
1707 return;
1708
1709 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1710 && obj->buffer.length >= 4)
1711 spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1712
1713 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1714 && obj->buffer.length == 8)
1715 spi->bits_per_word = *(u64 *)obj->buffer.pointer;
1716
1717 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1718 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1719 spi->mode |= SPI_LSB_FIRST;
1720
1721 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1722 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1723 spi->mode |= SPI_CPOL;
1724
1725 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1726 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1727 spi->mode |= SPI_CPHA;
1728}
1729
1730static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1731{
1732 struct spi_device *spi = data;
1733 struct spi_controller *ctlr = spi->controller;
1734
1735 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1736 struct acpi_resource_spi_serialbus *sb;
1737
1738 sb = &ares->data.spi_serial_bus;
1739 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1740 /*
1741 * ACPI DeviceSelection numbering is handled by the
1742 * host controller driver in Windows and can vary
1743 * from driver to driver. In Linux we always expect
1744 * 0 .. max - 1 so we need to ask the driver to
1745 * translate between the two schemes.
1746 */
1747 if (ctlr->fw_translate_cs) {
1748 int cs = ctlr->fw_translate_cs(ctlr,
1749 sb->device_selection);
1750 if (cs < 0)
1751 return cs;
1752 spi->chip_select = cs;
1753 } else {
1754 spi->chip_select = sb->device_selection;
1755 }
1756
1757 spi->max_speed_hz = sb->connection_speed;
1758
1759 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1760 spi->mode |= SPI_CPHA;
1761 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1762 spi->mode |= SPI_CPOL;
1763 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1764 spi->mode |= SPI_CS_HIGH;
1765 }
1766 } else if (spi->irq < 0) {
1767 struct resource r;
1768
1769 if (acpi_dev_resource_interrupt(ares, 0, &r))
1770 spi->irq = r.start;
1771 }
1772
1773 /* Always tell the ACPI core to skip this resource */
1774 return 1;
1775}
1776
1777static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1778 struct acpi_device *adev)
1779{
1780 struct list_head resource_list;
1781 struct spi_device *spi;
1782 int ret;
1783
1784 if (acpi_bus_get_status(adev) || !adev->status.present ||
1785 acpi_device_enumerated(adev))
1786 return AE_OK;
1787
1788 spi = spi_alloc_device(ctlr);
1789 if (!spi) {
1790 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1791 dev_name(&adev->dev));
1792 return AE_NO_MEMORY;
1793 }
1794
1795 ACPI_COMPANION_SET(&spi->dev, adev);
1796 spi->irq = -1;
1797
1798 INIT_LIST_HEAD(&resource_list);
1799 ret = acpi_dev_get_resources(adev, &resource_list,
1800 acpi_spi_add_resource, spi);
1801 acpi_dev_free_resource_list(&resource_list);
1802
1803 acpi_spi_parse_apple_properties(spi);
1804
1805 if (ret < 0 || !spi->max_speed_hz) {
1806 spi_dev_put(spi);
1807 return AE_OK;
1808 }
1809
1810 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1811 sizeof(spi->modalias));
1812
1813 if (spi->irq < 0)
1814 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1815
1816 acpi_device_set_enumerated(adev);
1817
1818 adev->power.flags.ignore_parent = true;
1819 if (spi_add_device(spi)) {
1820 adev->power.flags.ignore_parent = false;
1821 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1822 dev_name(&adev->dev));
1823 spi_dev_put(spi);
1824 }
1825
1826 return AE_OK;
1827}
1828
1829static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1830 void *data, void **return_value)
1831{
1832 struct spi_controller *ctlr = data;
1833 struct acpi_device *adev;
1834
1835 if (acpi_bus_get_device(handle, &adev))
1836 return AE_OK;
1837
1838 return acpi_register_spi_device(ctlr, adev);
1839}
1840
1841static void acpi_register_spi_devices(struct spi_controller *ctlr)
1842{
1843 acpi_status status;
1844 acpi_handle handle;
1845
1846 handle = ACPI_HANDLE(ctlr->dev.parent);
1847 if (!handle)
1848 return;
1849
1850 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1851 acpi_spi_add_device, NULL, ctlr, NULL);
1852 if (ACPI_FAILURE(status))
1853 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1854}
1855#else
1856static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1857#endif /* CONFIG_ACPI */
1858
1859static void spi_controller_release(struct device *dev)
1860{
1861 struct spi_controller *ctlr;
1862
1863 ctlr = container_of(dev, struct spi_controller, dev);
1864 kfree(ctlr);
1865}
1866
1867static struct class spi_master_class = {
1868 .name = "spi_master",
1869 .owner = THIS_MODULE,
1870 .dev_release = spi_controller_release,
1871 .dev_groups = spi_master_groups,
1872};
1873
1874#ifdef CONFIG_SPI_SLAVE
1875/**
1876 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
1877 * controller
1878 * @spi: device used for the current transfer
1879 */
1880int spi_slave_abort(struct spi_device *spi)
1881{
1882 struct spi_controller *ctlr = spi->controller;
1883
1884 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1885 return ctlr->slave_abort(ctlr);
1886
1887 return -ENOTSUPP;
1888}
1889EXPORT_SYMBOL_GPL(spi_slave_abort);
1890
1891static int match_true(struct device *dev, void *data)
1892{
1893 return 1;
1894}
1895
1896static ssize_t spi_slave_show(struct device *dev,
1897 struct device_attribute *attr, char *buf)
1898{
1899 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1900 dev);
1901 struct device *child;
1902
1903 child = device_find_child(&ctlr->dev, NULL, match_true);
1904 return sprintf(buf, "%s\n",
1905 child ? to_spi_device(child)->modalias : NULL);
1906}
1907
1908static ssize_t spi_slave_store(struct device *dev,
1909 struct device_attribute *attr, const char *buf,
1910 size_t count)
1911{
1912 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1913 dev);
1914 struct spi_device *spi;
1915 struct device *child;
1916 char name[32];
1917 int rc;
1918
1919 rc = sscanf(buf, "%31s", name);
1920 if (rc != 1 || !name[0])
1921 return -EINVAL;
1922
1923 child = device_find_child(&ctlr->dev, NULL, match_true);
1924 if (child) {
1925 /* Remove registered slave */
1926 device_unregister(child);
1927 put_device(child);
1928 }
1929
1930 if (strcmp(name, "(null)")) {
1931 /* Register new slave */
1932 spi = spi_alloc_device(ctlr);
1933 if (!spi)
1934 return -ENOMEM;
1935
1936 strlcpy(spi->modalias, name, sizeof(spi->modalias));
1937
1938 rc = spi_add_device(spi);
1939 if (rc) {
1940 spi_dev_put(spi);
1941 return rc;
1942 }
1943 }
1944
1945 return count;
1946}
1947
1948static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
1949
1950static struct attribute *spi_slave_attrs[] = {
1951 &dev_attr_slave.attr,
1952 NULL,
1953};
1954
1955static const struct attribute_group spi_slave_group = {
1956 .attrs = spi_slave_attrs,
1957};
1958
1959static const struct attribute_group *spi_slave_groups[] = {
1960 &spi_controller_statistics_group,
1961 &spi_slave_group,
1962 NULL,
1963};
1964
1965static struct class spi_slave_class = {
1966 .name = "spi_slave",
1967 .owner = THIS_MODULE,
1968 .dev_release = spi_controller_release,
1969 .dev_groups = spi_slave_groups,
1970};
1971#else
1972extern struct class spi_slave_class; /* dummy */
1973#endif
1974
1975/**
1976 * __spi_alloc_controller - allocate an SPI master or slave controller
1977 * @dev: the controller, possibly using the platform_bus
1978 * @size: how much zeroed driver-private data to allocate; the pointer to this
1979 * memory is in the driver_data field of the returned device,
1980 * accessible with spi_controller_get_devdata().
1981 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
1982 * slave (true) controller
1983 * Context: can sleep
1984 *
1985 * This call is used only by SPI controller drivers, which are the
1986 * only ones directly touching chip registers. It's how they allocate
1987 * an spi_controller structure, prior to calling spi_register_controller().
1988 *
1989 * This must be called from context that can sleep.
1990 *
1991 * The caller is responsible for assigning the bus number and initializing the
1992 * controller's methods before calling spi_register_controller(); and (after
1993 * errors adding the device) calling spi_controller_put() to prevent a memory
1994 * leak.
1995 *
1996 * Return: the SPI controller structure on success, else NULL.
1997 */
1998struct spi_controller *__spi_alloc_controller(struct device *dev,
1999 unsigned int size, bool slave)
2000{
2001 struct spi_controller *ctlr;
2002
2003 if (!dev)
2004 return NULL;
2005
2006 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2007 if (!ctlr)
2008 return NULL;
2009
2010 device_initialize(&ctlr->dev);
2011 ctlr->bus_num = -1;
2012 ctlr->num_chipselect = 1;
2013 ctlr->slave = slave;
2014 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2015 ctlr->dev.class = &spi_slave_class;
2016 else
2017 ctlr->dev.class = &spi_master_class;
2018 ctlr->dev.parent = dev;
2019 pm_suspend_ignore_children(&ctlr->dev, true);
2020 spi_controller_set_devdata(ctlr, &ctlr[1]);
2021
2022 return ctlr;
2023}
2024EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2025
2026#ifdef CONFIG_OF
2027static int of_spi_register_master(struct spi_controller *ctlr)
2028{
2029 int nb, i, *cs;
2030 struct device_node *np = ctlr->dev.of_node;
2031
2032 if (!np)
2033 return 0;
2034
2035 nb = of_gpio_named_count(np, "cs-gpios");
2036 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2037
2038 /* Return error only for an incorrectly formed cs-gpios property */
2039 if (nb == 0 || nb == -ENOENT)
2040 return 0;
2041 else if (nb < 0)
2042 return nb;
2043
2044 cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect,
2045 GFP_KERNEL);
2046 ctlr->cs_gpios = cs;
2047
2048 if (!ctlr->cs_gpios)
2049 return -ENOMEM;
2050
2051 for (i = 0; i < ctlr->num_chipselect; i++)
2052 cs[i] = -ENOENT;
2053
2054 for (i = 0; i < nb; i++)
2055 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2056
2057 return 0;
2058}
2059#else
2060static int of_spi_register_master(struct spi_controller *ctlr)
2061{
2062 return 0;
2063}
2064#endif
2065
2066/**
2067 * spi_register_controller - register SPI master or slave controller
2068 * @ctlr: initialized master, originally from spi_alloc_master() or
2069 * spi_alloc_slave()
2070 * Context: can sleep
2071 *
2072 * SPI controllers connect to their drivers using some non-SPI bus,
2073 * such as the platform bus. The final stage of probe() in that code
2074 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2075 *
2076 * SPI controllers use board specific (often SOC specific) bus numbers,
2077 * and board-specific addressing for SPI devices combines those numbers
2078 * with chip select numbers. Since SPI does not directly support dynamic
2079 * device identification, boards need configuration tables telling which
2080 * chip is at which address.
2081 *
2082 * This must be called from context that can sleep. It returns zero on
2083 * success, else a negative error code (dropping the controller's refcount).
2084 * After a successful return, the caller is responsible for calling
2085 * spi_unregister_controller().
2086 *
2087 * Return: zero on success, else a negative error code.
2088 */
2089int spi_register_controller(struct spi_controller *ctlr)
2090{
2091 struct device *dev = ctlr->dev.parent;
2092 struct boardinfo *bi;
2093 int status = -ENODEV;
2094 int id, first_dynamic;
2095
2096 if (!dev)
2097 return -ENODEV;
2098
2099 if (!spi_controller_is_slave(ctlr)) {
2100 status = of_spi_register_master(ctlr);
2101 if (status)
2102 return status;
2103 }
2104
2105 /* even if it's just one always-selected device, there must
2106 * be at least one chipselect
2107 */
2108 if (ctlr->num_chipselect == 0)
2109 return -EINVAL;
2110 /* allocate dynamic bus number using Linux idr */
2111 if ((ctlr->bus_num < 0) && ctlr->dev.of_node) {
2112 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2113 if (id >= 0) {
2114 ctlr->bus_num = id;
2115 mutex_lock(&board_lock);
2116 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2117 ctlr->bus_num + 1, GFP_KERNEL);
2118 mutex_unlock(&board_lock);
2119 if (WARN(id < 0, "couldn't get idr"))
2120 return id == -ENOSPC ? -EBUSY : id;
2121 }
2122 }
2123 if (ctlr->bus_num < 0) {
2124 first_dynamic = of_alias_get_highest_id("spi");
2125 if (first_dynamic < 0)
2126 first_dynamic = 0;
2127 else
2128 first_dynamic++;
2129
2130 mutex_lock(&board_lock);
2131 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2132 0, GFP_KERNEL);
2133 mutex_unlock(&board_lock);
2134 if (WARN(id < 0, "couldn't get idr"))
2135 return id;
2136 ctlr->bus_num = id;
2137 }
2138 INIT_LIST_HEAD(&ctlr->queue);
2139 spin_lock_init(&ctlr->queue_lock);
2140 spin_lock_init(&ctlr->bus_lock_spinlock);
2141 mutex_init(&ctlr->bus_lock_mutex);
2142 mutex_init(&ctlr->io_mutex);
2143 ctlr->bus_lock_flag = 0;
2144 init_completion(&ctlr->xfer_completion);
2145 if (!ctlr->max_dma_len)
2146 ctlr->max_dma_len = INT_MAX;
2147
2148 /* register the device, then userspace will see it.
2149 * registration fails if the bus ID is in use.
2150 */
2151 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2152 status = device_add(&ctlr->dev);
2153 if (status < 0) {
2154 /* free bus id */
2155 mutex_lock(&board_lock);
2156 idr_remove(&spi_master_idr, ctlr->bus_num);
2157 mutex_unlock(&board_lock);
2158 goto done;
2159 }
2160 dev_dbg(dev, "registered %s %s\n",
2161 spi_controller_is_slave(ctlr) ? "slave" : "master",
2162 dev_name(&ctlr->dev));
2163
2164 /* If we're using a queued driver, start the queue */
2165 if (ctlr->transfer)
2166 dev_info(dev, "controller is unqueued, this is deprecated\n");
2167 else {
2168 status = spi_controller_initialize_queue(ctlr);
2169 if (status) {
2170 device_del(&ctlr->dev);
2171 /* free bus id */
2172 mutex_lock(&board_lock);
2173 idr_remove(&spi_master_idr, ctlr->bus_num);
2174 mutex_unlock(&board_lock);
2175 goto done;
2176 }
2177 }
2178 /* add statistics */
2179 spin_lock_init(&ctlr->statistics.lock);
2180
2181 mutex_lock(&board_lock);
2182 list_add_tail(&ctlr->list, &spi_controller_list);
2183 list_for_each_entry(bi, &board_list, list)
2184 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2185 mutex_unlock(&board_lock);
2186
2187 /* Register devices from the device tree and ACPI */
2188 of_register_spi_devices(ctlr);
2189 acpi_register_spi_devices(ctlr);
2190done:
2191 return status;
2192}
2193EXPORT_SYMBOL_GPL(spi_register_controller);
2194
2195static void devm_spi_unregister(struct device *dev, void *res)
2196{
2197 spi_unregister_controller(*(struct spi_controller **)res);
2198}
2199
2200/**
2201 * devm_spi_register_controller - register managed SPI master or slave
2202 * controller
2203 * @dev: device managing SPI controller
2204 * @ctlr: initialized controller, originally from spi_alloc_master() or
2205 * spi_alloc_slave()
2206 * Context: can sleep
2207 *
2208 * Register a SPI device as with spi_register_controller() which will
2209 * automatically be unregistered and freed.
2210 *
2211 * Return: zero on success, else a negative error code.
2212 */
2213int devm_spi_register_controller(struct device *dev,
2214 struct spi_controller *ctlr)
2215{
2216 struct spi_controller **ptr;
2217 int ret;
2218
2219 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2220 if (!ptr)
2221 return -ENOMEM;
2222
2223 ret = spi_register_controller(ctlr);
2224 if (!ret) {
2225 *ptr = ctlr;
2226 devres_add(dev, ptr);
2227 } else {
2228 devres_free(ptr);
2229 }
2230
2231 return ret;
2232}
2233EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2234
2235static int __unregister(struct device *dev, void *null)
2236{
2237 spi_unregister_device(to_spi_device(dev));
2238 return 0;
2239}
2240
2241/**
2242 * spi_unregister_controller - unregister SPI master or slave controller
2243 * @ctlr: the controller being unregistered
2244 * Context: can sleep
2245 *
2246 * This call is used only by SPI controller drivers, which are the
2247 * only ones directly touching chip registers.
2248 *
2249 * This must be called from context that can sleep.
2250 *
2251 * Note that this function also drops a reference to the controller.
2252 */
2253void spi_unregister_controller(struct spi_controller *ctlr)
2254{
2255 struct spi_controller *found;
2256 int id = ctlr->bus_num;
2257 int dummy;
2258
2259 /* First make sure that this controller was ever added */
2260 mutex_lock(&board_lock);
2261 found = idr_find(&spi_master_idr, id);
2262 mutex_unlock(&board_lock);
2263 if (ctlr->queued) {
2264 if (spi_destroy_queue(ctlr))
2265 dev_err(&ctlr->dev, "queue remove failed\n");
2266 }
2267 mutex_lock(&board_lock);
2268 list_del(&ctlr->list);
2269 mutex_unlock(&board_lock);
2270
2271 dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
2272 device_unregister(&ctlr->dev);
2273 /* free bus id */
2274 mutex_lock(&board_lock);
2275 if (found == ctlr)
2276 idr_remove(&spi_master_idr, id);
2277 mutex_unlock(&board_lock);
2278}
2279EXPORT_SYMBOL_GPL(spi_unregister_controller);
2280
2281int spi_controller_suspend(struct spi_controller *ctlr)
2282{
2283 int ret;
2284
2285 /* Basically no-ops for non-queued controllers */
2286 if (!ctlr->queued)
2287 return 0;
2288
2289 ret = spi_stop_queue(ctlr);
2290 if (ret)
2291 dev_err(&ctlr->dev, "queue stop failed\n");
2292
2293 return ret;
2294}
2295EXPORT_SYMBOL_GPL(spi_controller_suspend);
2296
2297int spi_controller_resume(struct spi_controller *ctlr)
2298{
2299 int ret;
2300
2301 if (!ctlr->queued)
2302 return 0;
2303
2304 ret = spi_start_queue(ctlr);
2305 if (ret)
2306 dev_err(&ctlr->dev, "queue restart failed\n");
2307
2308 return ret;
2309}
2310EXPORT_SYMBOL_GPL(spi_controller_resume);
2311
2312static int __spi_controller_match(struct device *dev, const void *data)
2313{
2314 struct spi_controller *ctlr;
2315 const u16 *bus_num = data;
2316
2317 ctlr = container_of(dev, struct spi_controller, dev);
2318 return ctlr->bus_num == *bus_num;
2319}
2320
2321/**
2322 * spi_busnum_to_master - look up master associated with bus_num
2323 * @bus_num: the master's bus number
2324 * Context: can sleep
2325 *
2326 * This call may be used with devices that are registered after
2327 * arch init time. It returns a refcounted pointer to the relevant
2328 * spi_controller (which the caller must release), or NULL if there is
2329 * no such master registered.
2330 *
2331 * Return: the SPI master structure on success, else NULL.
2332 */
2333struct spi_controller *spi_busnum_to_master(u16 bus_num)
2334{
2335 struct device *dev;
2336 struct spi_controller *ctlr = NULL;
2337
2338 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2339 __spi_controller_match);
2340 if (dev)
2341 ctlr = container_of(dev, struct spi_controller, dev);
2342 /* reference got in class_find_device */
2343 return ctlr;
2344}
2345EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2346
2347/*-------------------------------------------------------------------------*/
2348
2349/* Core methods for SPI resource management */
2350
2351/**
2352 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2353 * during the processing of a spi_message while using
2354 * spi_transfer_one
2355 * @spi: the spi device for which we allocate memory
2356 * @release: the release code to execute for this resource
2357 * @size: size to alloc and return
2358 * @gfp: GFP allocation flags
2359 *
2360 * Return: the pointer to the allocated data
2361 *
2362 * This may get enhanced in the future to allocate from a memory pool
2363 * of the @spi_device or @spi_controller to avoid repeated allocations.
2364 */
2365void *spi_res_alloc(struct spi_device *spi,
2366 spi_res_release_t release,
2367 size_t size, gfp_t gfp)
2368{
2369 struct spi_res *sres;
2370
2371 sres = kzalloc(sizeof(*sres) + size, gfp);
2372 if (!sres)
2373 return NULL;
2374
2375 INIT_LIST_HEAD(&sres->entry);
2376 sres->release = release;
2377
2378 return sres->data;
2379}
2380EXPORT_SYMBOL_GPL(spi_res_alloc);
2381
2382/**
2383 * spi_res_free - free an spi resource
2384 * @res: pointer to the custom data of a resource
2385 *
2386 */
2387void spi_res_free(void *res)
2388{
2389 struct spi_res *sres = container_of(res, struct spi_res, data);
2390
2391 if (!res)
2392 return;
2393
2394 WARN_ON(!list_empty(&sres->entry));
2395 kfree(sres);
2396}
2397EXPORT_SYMBOL_GPL(spi_res_free);
2398
2399/**
2400 * spi_res_add - add a spi_res to the spi_message
2401 * @message: the spi message
2402 * @res: the spi_resource
2403 */
2404void spi_res_add(struct spi_message *message, void *res)
2405{
2406 struct spi_res *sres = container_of(res, struct spi_res, data);
2407
2408 WARN_ON(!list_empty(&sres->entry));
2409 list_add_tail(&sres->entry, &message->resources);
2410}
2411EXPORT_SYMBOL_GPL(spi_res_add);
2412
2413/**
2414 * spi_res_release - release all spi resources for this message
2415 * @ctlr: the @spi_controller
2416 * @message: the @spi_message
2417 */
2418void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2419{
2420 struct spi_res *res;
2421
2422 while (!list_empty(&message->resources)) {
2423 res = list_last_entry(&message->resources,
2424 struct spi_res, entry);
2425
2426 if (res->release)
2427 res->release(ctlr, message, res->data);
2428
2429 list_del(&res->entry);
2430
2431 kfree(res);
2432 }
2433}
2434EXPORT_SYMBOL_GPL(spi_res_release);
2435
2436/*-------------------------------------------------------------------------*/
2437
2438/* Core methods for spi_message alterations */
2439
2440static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2441 struct spi_message *msg,
2442 void *res)
2443{
2444 struct spi_replaced_transfers *rxfer = res;
2445 size_t i;
2446
2447 /* call extra callback if requested */
2448 if (rxfer->release)
2449 rxfer->release(ctlr, msg, res);
2450
2451 /* insert replaced transfers back into the message */
2452 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2453
2454 /* remove the formerly inserted entries */
2455 for (i = 0; i < rxfer->inserted; i++)
2456 list_del(&rxfer->inserted_transfers[i].transfer_list);
2457}
2458
2459/**
2460 * spi_replace_transfers - replace transfers with several transfers
2461 * and register change with spi_message.resources
2462 * @msg: the spi_message we work upon
2463 * @xfer_first: the first spi_transfer we want to replace
2464 * @remove: number of transfers to remove
2465 * @insert: the number of transfers we want to insert instead
2466 * @release: extra release code necessary in some circumstances
2467 * @extradatasize: extra data to allocate (with alignment guarantees
2468 * of struct @spi_transfer)
2469 * @gfp: gfp flags
2470 *
2471 * Returns: pointer to @spi_replaced_transfers,
2472 * PTR_ERR(...) in case of errors.
2473 */
2474struct spi_replaced_transfers *spi_replace_transfers(
2475 struct spi_message *msg,
2476 struct spi_transfer *xfer_first,
2477 size_t remove,
2478 size_t insert,
2479 spi_replaced_release_t release,
2480 size_t extradatasize,
2481 gfp_t gfp)
2482{
2483 struct spi_replaced_transfers *rxfer;
2484 struct spi_transfer *xfer;
2485 size_t i;
2486
2487 /* allocate the structure using spi_res */
2488 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2489 insert * sizeof(struct spi_transfer)
2490 + sizeof(struct spi_replaced_transfers)
2491 + extradatasize,
2492 gfp);
2493 if (!rxfer)
2494 return ERR_PTR(-ENOMEM);
2495
2496 /* the release code to invoke before running the generic release */
2497 rxfer->release = release;
2498
2499 /* assign extradata */
2500 if (extradatasize)
2501 rxfer->extradata =
2502 &rxfer->inserted_transfers[insert];
2503
2504 /* init the replaced_transfers list */
2505 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2506
2507 /* assign the list_entry after which we should reinsert
2508 * the @replaced_transfers - it may be spi_message.messages!
2509 */
2510 rxfer->replaced_after = xfer_first->transfer_list.prev;
2511
2512 /* remove the requested number of transfers */
2513 for (i = 0; i < remove; i++) {
2514 /* if the entry after replaced_after it is msg->transfers
2515 * then we have been requested to remove more transfers
2516 * than are in the list
2517 */
2518 if (rxfer->replaced_after->next == &msg->transfers) {
2519 dev_err(&msg->spi->dev,
2520 "requested to remove more spi_transfers than are available\n");
2521 /* insert replaced transfers back into the message */
2522 list_splice(&rxfer->replaced_transfers,
2523 rxfer->replaced_after);
2524
2525 /* free the spi_replace_transfer structure */
2526 spi_res_free(rxfer);
2527
2528 /* and return with an error */
2529 return ERR_PTR(-EINVAL);
2530 }
2531
2532 /* remove the entry after replaced_after from list of
2533 * transfers and add it to list of replaced_transfers
2534 */
2535 list_move_tail(rxfer->replaced_after->next,
2536 &rxfer->replaced_transfers);
2537 }
2538
2539 /* create copy of the given xfer with identical settings
2540 * based on the first transfer to get removed
2541 */
2542 for (i = 0; i < insert; i++) {
2543 /* we need to run in reverse order */
2544 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2545
2546 /* copy all spi_transfer data */
2547 memcpy(xfer, xfer_first, sizeof(*xfer));
2548
2549 /* add to list */
2550 list_add(&xfer->transfer_list, rxfer->replaced_after);
2551
2552 /* clear cs_change and delay_usecs for all but the last */
2553 if (i) {
2554 xfer->cs_change = false;
2555 xfer->delay_usecs = 0;
2556 }
2557 }
2558
2559 /* set up inserted */
2560 rxfer->inserted = insert;
2561
2562 /* and register it with spi_res/spi_message */
2563 spi_res_add(msg, rxfer);
2564
2565 return rxfer;
2566}
2567EXPORT_SYMBOL_GPL(spi_replace_transfers);
2568
2569static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2570 struct spi_message *msg,
2571 struct spi_transfer **xferp,
2572 size_t maxsize,
2573 gfp_t gfp)
2574{
2575 struct spi_transfer *xfer = *xferp, *xfers;
2576 struct spi_replaced_transfers *srt;
2577 size_t offset;
2578 size_t count, i;
2579
2580 /* warn once about this fact that we are splitting a transfer */
2581 dev_warn_once(&msg->spi->dev,
2582 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2583 xfer->len, maxsize);
2584
2585 /* calculate how many we have to replace */
2586 count = DIV_ROUND_UP(xfer->len, maxsize);
2587
2588 /* create replacement */
2589 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2590 if (IS_ERR(srt))
2591 return PTR_ERR(srt);
2592 xfers = srt->inserted_transfers;
2593
2594 /* now handle each of those newly inserted spi_transfers
2595 * note that the replacements spi_transfers all are preset
2596 * to the same values as *xferp, so tx_buf, rx_buf and len
2597 * are all identical (as well as most others)
2598 * so we just have to fix up len and the pointers.
2599 *
2600 * this also includes support for the depreciated
2601 * spi_message.is_dma_mapped interface
2602 */
2603
2604 /* the first transfer just needs the length modified, so we
2605 * run it outside the loop
2606 */
2607 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2608
2609 /* all the others need rx_buf/tx_buf also set */
2610 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2611 /* update rx_buf, tx_buf and dma */
2612 if (xfers[i].rx_buf)
2613 xfers[i].rx_buf += offset;
2614 if (xfers[i].rx_dma)
2615 xfers[i].rx_dma += offset;
2616 if (xfers[i].tx_buf)
2617 xfers[i].tx_buf += offset;
2618 if (xfers[i].tx_dma)
2619 xfers[i].tx_dma += offset;
2620
2621 /* update length */
2622 xfers[i].len = min(maxsize, xfers[i].len - offset);
2623 }
2624
2625 /* we set up xferp to the last entry we have inserted,
2626 * so that we skip those already split transfers
2627 */
2628 *xferp = &xfers[count - 1];
2629
2630 /* increment statistics counters */
2631 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2632 transfers_split_maxsize);
2633 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2634 transfers_split_maxsize);
2635
2636 return 0;
2637}
2638
2639/**
2640 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2641 * when an individual transfer exceeds a
2642 * certain size
2643 * @ctlr: the @spi_controller for this transfer
2644 * @msg: the @spi_message to transform
2645 * @maxsize: the maximum when to apply this
2646 * @gfp: GFP allocation flags
2647 *
2648 * Return: status of transformation
2649 */
2650int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2651 struct spi_message *msg,
2652 size_t maxsize,
2653 gfp_t gfp)
2654{
2655 struct spi_transfer *xfer;
2656 int ret;
2657
2658 /* iterate over the transfer_list,
2659 * but note that xfer is advanced to the last transfer inserted
2660 * to avoid checking sizes again unnecessarily (also xfer does
2661 * potentiall belong to a different list by the time the
2662 * replacement has happened
2663 */
2664 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2665 if (xfer->len > maxsize) {
2666 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2667 maxsize, gfp);
2668 if (ret)
2669 return ret;
2670 }
2671 }
2672
2673 return 0;
2674}
2675EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2676
2677/*-------------------------------------------------------------------------*/
2678
2679/* Core methods for SPI controller protocol drivers. Some of the
2680 * other core methods are currently defined as inline functions.
2681 */
2682
2683static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2684 u8 bits_per_word)
2685{
2686 if (ctlr->bits_per_word_mask) {
2687 /* Only 32 bits fit in the mask */
2688 if (bits_per_word > 32)
2689 return -EINVAL;
2690 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2691 return -EINVAL;
2692 }
2693
2694 return 0;
2695}
2696
2697/**
2698 * spi_setup - setup SPI mode and clock rate
2699 * @spi: the device whose settings are being modified
2700 * Context: can sleep, and no requests are queued to the device
2701 *
2702 * SPI protocol drivers may need to update the transfer mode if the
2703 * device doesn't work with its default. They may likewise need
2704 * to update clock rates or word sizes from initial values. This function
2705 * changes those settings, and must be called from a context that can sleep.
2706 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2707 * effect the next time the device is selected and data is transferred to
2708 * or from it. When this function returns, the spi device is deselected.
2709 *
2710 * Note that this call will fail if the protocol driver specifies an option
2711 * that the underlying controller or its driver does not support. For
2712 * example, not all hardware supports wire transfers using nine bit words,
2713 * LSB-first wire encoding, or active-high chipselects.
2714 *
2715 * Return: zero on success, else a negative error code.
2716 */
2717int spi_setup(struct spi_device *spi)
2718{
2719 unsigned bad_bits, ugly_bits;
2720 int status;
2721
2722 /* check mode to prevent that DUAL and QUAD set at the same time
2723 */
2724 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2725 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2726 dev_err(&spi->dev,
2727 "setup: can not select dual and quad at the same time\n");
2728 return -EINVAL;
2729 }
2730 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2731 */
2732 if ((spi->mode & SPI_3WIRE) && (spi->mode &
2733 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2734 return -EINVAL;
2735 /* help drivers fail *cleanly* when they need options
2736 * that aren't supported with their current controller
2737 */
2738 bad_bits = spi->mode & ~spi->controller->mode_bits;
2739 ugly_bits = bad_bits &
2740 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2741 if (ugly_bits) {
2742 dev_warn(&spi->dev,
2743 "setup: ignoring unsupported mode bits %x\n",
2744 ugly_bits);
2745 spi->mode &= ~ugly_bits;
2746 bad_bits &= ~ugly_bits;
2747 }
2748 if (bad_bits) {
2749 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2750 bad_bits);
2751 return -EINVAL;
2752 }
2753
2754 if (!spi->bits_per_word)
2755 spi->bits_per_word = 8;
2756
2757 status = __spi_validate_bits_per_word(spi->controller,
2758 spi->bits_per_word);
2759 if (status)
2760 return status;
2761
2762 if (!spi->max_speed_hz)
2763 spi->max_speed_hz = spi->controller->max_speed_hz;
2764
2765 if (spi->controller->setup)
2766 status = spi->controller->setup(spi);
2767
2768 spi_set_cs(spi, false);
2769
2770 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2771 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2772 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2773 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2774 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2775 (spi->mode & SPI_LOOP) ? "loopback, " : "",
2776 spi->bits_per_word, spi->max_speed_hz,
2777 status);
2778
2779 return status;
2780}
2781EXPORT_SYMBOL_GPL(spi_setup);
2782
2783static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2784{
2785 struct spi_controller *ctlr = spi->controller;
2786 struct spi_transfer *xfer;
2787 int w_size;
2788
2789 if (list_empty(&message->transfers))
2790 return -EINVAL;
2791
2792 /* Half-duplex links include original MicroWire, and ones with
2793 * only one data pin like SPI_3WIRE (switches direction) or where
2794 * either MOSI or MISO is missing. They can also be caused by
2795 * software limitations.
2796 */
2797 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
2798 (spi->mode & SPI_3WIRE)) {
2799 unsigned flags = ctlr->flags;
2800
2801 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2802 if (xfer->rx_buf && xfer->tx_buf)
2803 return -EINVAL;
2804 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
2805 return -EINVAL;
2806 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
2807 return -EINVAL;
2808 }
2809 }
2810
2811 /**
2812 * Set transfer bits_per_word and max speed as spi device default if
2813 * it is not set for this transfer.
2814 * Set transfer tx_nbits and rx_nbits as single transfer default
2815 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2816 */
2817 message->frame_length = 0;
2818 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2819 message->frame_length += xfer->len;
2820 if (!xfer->bits_per_word)
2821 xfer->bits_per_word = spi->bits_per_word;
2822
2823 if (!xfer->speed_hz)
2824 xfer->speed_hz = spi->max_speed_hz;
2825 if (!xfer->speed_hz)
2826 xfer->speed_hz = ctlr->max_speed_hz;
2827
2828 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
2829 xfer->speed_hz = ctlr->max_speed_hz;
2830
2831 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
2832 return -EINVAL;
2833
2834 /*
2835 * SPI transfer length should be multiple of SPI word size
2836 * where SPI word size should be power-of-two multiple
2837 */
2838 if (xfer->bits_per_word <= 8)
2839 w_size = 1;
2840 else if (xfer->bits_per_word <= 16)
2841 w_size = 2;
2842 else
2843 w_size = 4;
2844
2845 /* No partial transfers accepted */
2846 if (xfer->len % w_size)
2847 return -EINVAL;
2848
2849 if (xfer->speed_hz && ctlr->min_speed_hz &&
2850 xfer->speed_hz < ctlr->min_speed_hz)
2851 return -EINVAL;
2852
2853 if (xfer->tx_buf && !xfer->tx_nbits)
2854 xfer->tx_nbits = SPI_NBITS_SINGLE;
2855 if (xfer->rx_buf && !xfer->rx_nbits)
2856 xfer->rx_nbits = SPI_NBITS_SINGLE;
2857 /* check transfer tx/rx_nbits:
2858 * 1. check the value matches one of single, dual and quad
2859 * 2. check tx/rx_nbits match the mode in spi_device
2860 */
2861 if (xfer->tx_buf) {
2862 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2863 xfer->tx_nbits != SPI_NBITS_DUAL &&
2864 xfer->tx_nbits != SPI_NBITS_QUAD)
2865 return -EINVAL;
2866 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2867 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2868 return -EINVAL;
2869 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2870 !(spi->mode & SPI_TX_QUAD))
2871 return -EINVAL;
2872 }
2873 /* check transfer rx_nbits */
2874 if (xfer->rx_buf) {
2875 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2876 xfer->rx_nbits != SPI_NBITS_DUAL &&
2877 xfer->rx_nbits != SPI_NBITS_QUAD)
2878 return -EINVAL;
2879 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2880 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2881 return -EINVAL;
2882 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2883 !(spi->mode & SPI_RX_QUAD))
2884 return -EINVAL;
2885 }
2886 }
2887
2888 message->status = -EINPROGRESS;
2889
2890 return 0;
2891}
2892
2893static int __spi_async(struct spi_device *spi, struct spi_message *message)
2894{
2895 struct spi_controller *ctlr = spi->controller;
2896
2897 message->spi = spi;
2898
2899 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
2900 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2901
2902 trace_spi_message_submit(message);
2903
2904 return ctlr->transfer(spi, message);
2905}
2906
2907/**
2908 * spi_async - asynchronous SPI transfer
2909 * @spi: device with which data will be exchanged
2910 * @message: describes the data transfers, including completion callback
2911 * Context: any (irqs may be blocked, etc)
2912 *
2913 * This call may be used in_irq and other contexts which can't sleep,
2914 * as well as from task contexts which can sleep.
2915 *
2916 * The completion callback is invoked in a context which can't sleep.
2917 * Before that invocation, the value of message->status is undefined.
2918 * When the callback is issued, message->status holds either zero (to
2919 * indicate complete success) or a negative error code. After that
2920 * callback returns, the driver which issued the transfer request may
2921 * deallocate the associated memory; it's no longer in use by any SPI
2922 * core or controller driver code.
2923 *
2924 * Note that although all messages to a spi_device are handled in
2925 * FIFO order, messages may go to different devices in other orders.
2926 * Some device might be higher priority, or have various "hard" access
2927 * time requirements, for example.
2928 *
2929 * On detection of any fault during the transfer, processing of
2930 * the entire message is aborted, and the device is deselected.
2931 * Until returning from the associated message completion callback,
2932 * no other spi_message queued to that device will be processed.
2933 * (This rule applies equally to all the synchronous transfer calls,
2934 * which are wrappers around this core asynchronous primitive.)
2935 *
2936 * Return: zero on success, else a negative error code.
2937 */
2938int spi_async(struct spi_device *spi, struct spi_message *message)
2939{
2940 struct spi_controller *ctlr = spi->controller;
2941 int ret;
2942 unsigned long flags;
2943
2944 ret = __spi_validate(spi, message);
2945 if (ret != 0)
2946 return ret;
2947
2948 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2949
2950 if (ctlr->bus_lock_flag)
2951 ret = -EBUSY;
2952 else
2953 ret = __spi_async(spi, message);
2954
2955 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
2956
2957 return ret;
2958}
2959EXPORT_SYMBOL_GPL(spi_async);
2960
2961/**
2962 * spi_async_locked - version of spi_async with exclusive bus usage
2963 * @spi: device with which data will be exchanged
2964 * @message: describes the data transfers, including completion callback
2965 * Context: any (irqs may be blocked, etc)
2966 *
2967 * This call may be used in_irq and other contexts which can't sleep,
2968 * as well as from task contexts which can sleep.
2969 *
2970 * The completion callback is invoked in a context which can't sleep.
2971 * Before that invocation, the value of message->status is undefined.
2972 * When the callback is issued, message->status holds either zero (to
2973 * indicate complete success) or a negative error code. After that
2974 * callback returns, the driver which issued the transfer request may
2975 * deallocate the associated memory; it's no longer in use by any SPI
2976 * core or controller driver code.
2977 *
2978 * Note that although all messages to a spi_device are handled in
2979 * FIFO order, messages may go to different devices in other orders.
2980 * Some device might be higher priority, or have various "hard" access
2981 * time requirements, for example.
2982 *
2983 * On detection of any fault during the transfer, processing of
2984 * the entire message is aborted, and the device is deselected.
2985 * Until returning from the associated message completion callback,
2986 * no other spi_message queued to that device will be processed.
2987 * (This rule applies equally to all the synchronous transfer calls,
2988 * which are wrappers around this core asynchronous primitive.)
2989 *
2990 * Return: zero on success, else a negative error code.
2991 */
2992int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2993{
2994 struct spi_controller *ctlr = spi->controller;
2995 int ret;
2996 unsigned long flags;
2997
2998 ret = __spi_validate(spi, message);
2999 if (ret != 0)
3000 return ret;
3001
3002 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3003
3004 ret = __spi_async(spi, message);
3005
3006 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3007
3008 return ret;
3009
3010}
3011EXPORT_SYMBOL_GPL(spi_async_locked);
3012
3013
3014int spi_flash_read(struct spi_device *spi,
3015 struct spi_flash_read_message *msg)
3016
3017{
3018 struct spi_controller *master = spi->controller;
3019 struct device *rx_dev = NULL;
3020 int ret;
3021
3022 if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
3023 msg->addr_nbits == SPI_NBITS_DUAL) &&
3024 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3025 return -EINVAL;
3026 if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
3027 msg->addr_nbits == SPI_NBITS_QUAD) &&
3028 !(spi->mode & SPI_TX_QUAD))
3029 return -EINVAL;
3030 if (msg->data_nbits == SPI_NBITS_DUAL &&
3031 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3032 return -EINVAL;
3033 if (msg->data_nbits == SPI_NBITS_QUAD &&
3034 !(spi->mode & SPI_RX_QUAD))
3035 return -EINVAL;
3036
3037 if (master->auto_runtime_pm) {
3038 ret = pm_runtime_get_sync(master->dev.parent);
3039 if (ret < 0) {
3040 dev_err(&master->dev, "Failed to power device: %d\n",
3041 ret);
3042 return ret;
3043 }
3044 }
3045
3046 mutex_lock(&master->bus_lock_mutex);
3047 mutex_lock(&master->io_mutex);
3048 if (master->dma_rx && master->spi_flash_can_dma(spi, msg)) {
3049 rx_dev = master->dma_rx->device->dev;
3050 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
3051 msg->buf, msg->len,
3052 DMA_FROM_DEVICE);
3053 if (!ret)
3054 msg->cur_msg_mapped = true;
3055 }
3056 ret = master->spi_flash_read(spi, msg);
3057 if (msg->cur_msg_mapped)
3058 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
3059 DMA_FROM_DEVICE);
3060 mutex_unlock(&master->io_mutex);
3061 mutex_unlock(&master->bus_lock_mutex);
3062
3063 if (master->auto_runtime_pm)
3064 pm_runtime_put(master->dev.parent);
3065
3066 return ret;
3067}
3068EXPORT_SYMBOL_GPL(spi_flash_read);
3069
3070/*-------------------------------------------------------------------------*/
3071
3072/* Utility methods for SPI protocol drivers, layered on
3073 * top of the core. Some other utility methods are defined as
3074 * inline functions.
3075 */
3076
3077static void spi_complete(void *arg)
3078{
3079 complete(arg);
3080}
3081
3082static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3083{
3084 DECLARE_COMPLETION_ONSTACK(done);
3085 int status;
3086 struct spi_controller *ctlr = spi->controller;
3087 unsigned long flags;
3088
3089 status = __spi_validate(spi, message);
3090 if (status != 0)
3091 return status;
3092
3093 message->complete = spi_complete;
3094 message->context = &done;
3095 message->spi = spi;
3096
3097 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3098 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3099
3100 /* If we're not using the legacy transfer method then we will
3101 * try to transfer in the calling context so special case.
3102 * This code would be less tricky if we could remove the
3103 * support for driver implemented message queues.
3104 */
3105 if (ctlr->transfer == spi_queued_transfer) {
3106 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3107
3108 trace_spi_message_submit(message);
3109
3110 status = __spi_queued_transfer(spi, message, false);
3111
3112 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3113 } else {
3114 status = spi_async_locked(spi, message);
3115 }
3116
3117 if (status == 0) {
3118 /* Push out the messages in the calling context if we
3119 * can.
3120 */
3121 if (ctlr->transfer == spi_queued_transfer) {
3122 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3123 spi_sync_immediate);
3124 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3125 spi_sync_immediate);
3126 __spi_pump_messages(ctlr, false);
3127 }
3128
3129 wait_for_completion(&done);
3130 status = message->status;
3131 }
3132 message->context = NULL;
3133 return status;
3134}
3135
3136/**
3137 * spi_sync - blocking/synchronous SPI data transfers
3138 * @spi: device with which data will be exchanged
3139 * @message: describes the data transfers
3140 * Context: can sleep
3141 *
3142 * This call may only be used from a context that may sleep. The sleep
3143 * is non-interruptible, and has no timeout. Low-overhead controller
3144 * drivers may DMA directly into and out of the message buffers.
3145 *
3146 * Note that the SPI device's chip select is active during the message,
3147 * and then is normally disabled between messages. Drivers for some
3148 * frequently-used devices may want to minimize costs of selecting a chip,
3149 * by leaving it selected in anticipation that the next message will go
3150 * to the same chip. (That may increase power usage.)
3151 *
3152 * Also, the caller is guaranteeing that the memory associated with the
3153 * message will not be freed before this call returns.
3154 *
3155 * Return: zero on success, else a negative error code.
3156 */
3157int spi_sync(struct spi_device *spi, struct spi_message *message)
3158{
3159 int ret;
3160
3161 mutex_lock(&spi->controller->bus_lock_mutex);
3162 ret = __spi_sync(spi, message);
3163 mutex_unlock(&spi->controller->bus_lock_mutex);
3164
3165 return ret;
3166}
3167EXPORT_SYMBOL_GPL(spi_sync);
3168
3169/**
3170 * spi_sync_locked - version of spi_sync with exclusive bus usage
3171 * @spi: device with which data will be exchanged
3172 * @message: describes the data transfers
3173 * Context: can sleep
3174 *
3175 * This call may only be used from a context that may sleep. The sleep
3176 * is non-interruptible, and has no timeout. Low-overhead controller
3177 * drivers may DMA directly into and out of the message buffers.
3178 *
3179 * This call should be used by drivers that require exclusive access to the
3180 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3181 * be released by a spi_bus_unlock call when the exclusive access is over.
3182 *
3183 * Return: zero on success, else a negative error code.
3184 */
3185int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3186{
3187 return __spi_sync(spi, message);
3188}
3189EXPORT_SYMBOL_GPL(spi_sync_locked);
3190
3191/**
3192 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3193 * @ctlr: SPI bus master that should be locked for exclusive bus access
3194 * Context: can sleep
3195 *
3196 * This call may only be used from a context that may sleep. The sleep
3197 * is non-interruptible, and has no timeout.
3198 *
3199 * This call should be used by drivers that require exclusive access to the
3200 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3201 * exclusive access is over. Data transfer must be done by spi_sync_locked
3202 * and spi_async_locked calls when the SPI bus lock is held.
3203 *
3204 * Return: always zero.
3205 */
3206int spi_bus_lock(struct spi_controller *ctlr)
3207{
3208 unsigned long flags;
3209
3210 mutex_lock(&ctlr->bus_lock_mutex);
3211
3212 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3213 ctlr->bus_lock_flag = 1;
3214 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3215
3216 /* mutex remains locked until spi_bus_unlock is called */
3217
3218 return 0;
3219}
3220EXPORT_SYMBOL_GPL(spi_bus_lock);
3221
3222/**
3223 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3224 * @ctlr: SPI bus master that was locked for exclusive bus access
3225 * Context: can sleep
3226 *
3227 * This call may only be used from a context that may sleep. The sleep
3228 * is non-interruptible, and has no timeout.
3229 *
3230 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3231 * call.
3232 *
3233 * Return: always zero.
3234 */
3235int spi_bus_unlock(struct spi_controller *ctlr)
3236{
3237 ctlr->bus_lock_flag = 0;
3238
3239 mutex_unlock(&ctlr->bus_lock_mutex);
3240
3241 return 0;
3242}
3243EXPORT_SYMBOL_GPL(spi_bus_unlock);
3244
3245/* portable code must never pass more than 32 bytes */
3246#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3247
3248static u8 *buf;
3249
3250/**
3251 * spi_write_then_read - SPI synchronous write followed by read
3252 * @spi: device with which data will be exchanged
3253 * @txbuf: data to be written (need not be dma-safe)
3254 * @n_tx: size of txbuf, in bytes
3255 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3256 * @n_rx: size of rxbuf, in bytes
3257 * Context: can sleep
3258 *
3259 * This performs a half duplex MicroWire style transaction with the
3260 * device, sending txbuf and then reading rxbuf. The return value
3261 * is zero for success, else a negative errno status code.
3262 * This call may only be used from a context that may sleep.
3263 *
3264 * Parameters to this routine are always copied using a small buffer;
3265 * portable code should never use this for more than 32 bytes.
3266 * Performance-sensitive or bulk transfer code should instead use
3267 * spi_{async,sync}() calls with dma-safe buffers.
3268 *
3269 * Return: zero on success, else a negative error code.
3270 */
3271int spi_write_then_read(struct spi_device *spi,
3272 const void *txbuf, unsigned n_tx,
3273 void *rxbuf, unsigned n_rx)
3274{
3275 static DEFINE_MUTEX(lock);
3276
3277 int status;
3278 struct spi_message message;
3279 struct spi_transfer x[2];
3280 u8 *local_buf;
3281
3282 /* Use preallocated DMA-safe buffer if we can. We can't avoid
3283 * copying here, (as a pure convenience thing), but we can
3284 * keep heap costs out of the hot path unless someone else is
3285 * using the pre-allocated buffer or the transfer is too large.
3286 */
3287 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3288 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3289 GFP_KERNEL | GFP_DMA);
3290 if (!local_buf)
3291 return -ENOMEM;
3292 } else {
3293 local_buf = buf;
3294 }
3295
3296 spi_message_init(&message);
3297 memset(x, 0, sizeof(x));
3298 if (n_tx) {
3299 x[0].len = n_tx;
3300 spi_message_add_tail(&x[0], &message);
3301 }
3302 if (n_rx) {
3303 x[1].len = n_rx;
3304 spi_message_add_tail(&x[1], &message);
3305 }
3306
3307 memcpy(local_buf, txbuf, n_tx);
3308 x[0].tx_buf = local_buf;
3309 x[1].rx_buf = local_buf + n_tx;
3310
3311 /* do the i/o */
3312 status = spi_sync(spi, &message);
3313 if (status == 0)
3314 memcpy(rxbuf, x[1].rx_buf, n_rx);
3315
3316 if (x[0].tx_buf == buf)
3317 mutex_unlock(&lock);
3318 else
3319 kfree(local_buf);
3320
3321 return status;
3322}
3323EXPORT_SYMBOL_GPL(spi_write_then_read);
3324
3325/*-------------------------------------------------------------------------*/
3326
3327#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3328static int __spi_of_device_match(struct device *dev, void *data)
3329{
3330 return dev->of_node == data;
3331}
3332
3333/* must call put_device() when done with returned spi_device device */
3334static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3335{
3336 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3337 __spi_of_device_match);
3338 return dev ? to_spi_device(dev) : NULL;
3339}
3340
3341static int __spi_of_controller_match(struct device *dev, const void *data)
3342{
3343 return dev->of_node == data;
3344}
3345
3346/* the spi controllers are not using spi_bus, so we find it with another way */
3347static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3348{
3349 struct device *dev;
3350
3351 dev = class_find_device(&spi_master_class, NULL, node,
3352 __spi_of_controller_match);
3353 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3354 dev = class_find_device(&spi_slave_class, NULL, node,
3355 __spi_of_controller_match);
3356 if (!dev)
3357 return NULL;
3358
3359 /* reference got in class_find_device */
3360 return container_of(dev, struct spi_controller, dev);
3361}
3362
3363static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3364 void *arg)
3365{
3366 struct of_reconfig_data *rd = arg;
3367 struct spi_controller *ctlr;
3368 struct spi_device *spi;
3369
3370 switch (of_reconfig_get_state_change(action, arg)) {
3371 case OF_RECONFIG_CHANGE_ADD:
3372 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3373 if (ctlr == NULL)
3374 return NOTIFY_OK; /* not for us */
3375
3376 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3377 put_device(&ctlr->dev);
3378 return NOTIFY_OK;
3379 }
3380
3381 spi = of_register_spi_device(ctlr, rd->dn);
3382 put_device(&ctlr->dev);
3383
3384 if (IS_ERR(spi)) {
3385 pr_err("%s: failed to create for '%pOF'\n",
3386 __func__, rd->dn);
3387 of_node_clear_flag(rd->dn, OF_POPULATED);
3388 return notifier_from_errno(PTR_ERR(spi));
3389 }
3390 break;
3391
3392 case OF_RECONFIG_CHANGE_REMOVE:
3393 /* already depopulated? */
3394 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3395 return NOTIFY_OK;
3396
3397 /* find our device by node */
3398 spi = of_find_spi_device_by_node(rd->dn);
3399 if (spi == NULL)
3400 return NOTIFY_OK; /* no? not meant for us */
3401
3402 /* unregister takes one ref away */
3403 spi_unregister_device(spi);
3404
3405 /* and put the reference of the find */
3406 put_device(&spi->dev);
3407 break;
3408 }
3409
3410 return NOTIFY_OK;
3411}
3412
3413static struct notifier_block spi_of_notifier = {
3414 .notifier_call = of_spi_notify,
3415};
3416#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3417extern struct notifier_block spi_of_notifier;
3418#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3419
3420#if IS_ENABLED(CONFIG_ACPI)
3421static int spi_acpi_controller_match(struct device *dev, const void *data)
3422{
3423 return ACPI_COMPANION(dev->parent) == data;
3424}
3425
3426static int spi_acpi_device_match(struct device *dev, void *data)
3427{
3428 return ACPI_COMPANION(dev) == data;
3429}
3430
3431static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3432{
3433 struct device *dev;
3434
3435 dev = class_find_device(&spi_master_class, NULL, adev,
3436 spi_acpi_controller_match);
3437 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3438 dev = class_find_device(&spi_slave_class, NULL, adev,
3439 spi_acpi_controller_match);
3440 if (!dev)
3441 return NULL;
3442
3443 return container_of(dev, struct spi_controller, dev);
3444}
3445
3446static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3447{
3448 struct device *dev;
3449
3450 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3451
3452 return dev ? to_spi_device(dev) : NULL;
3453}
3454
3455static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3456 void *arg)
3457{
3458 struct acpi_device *adev = arg;
3459 struct spi_controller *ctlr;
3460 struct spi_device *spi;
3461
3462 switch (value) {
3463 case ACPI_RECONFIG_DEVICE_ADD:
3464 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3465 if (!ctlr)
3466 break;
3467
3468 acpi_register_spi_device(ctlr, adev);
3469 put_device(&ctlr->dev);
3470 break;
3471 case ACPI_RECONFIG_DEVICE_REMOVE:
3472 if (!acpi_device_enumerated(adev))
3473 break;
3474
3475 spi = acpi_spi_find_device_by_adev(adev);
3476 if (!spi)
3477 break;
3478
3479 spi_unregister_device(spi);
3480 put_device(&spi->dev);
3481 break;
3482 }
3483
3484 return NOTIFY_OK;
3485}
3486
3487static struct notifier_block spi_acpi_notifier = {
3488 .notifier_call = acpi_spi_notify,
3489};
3490#else
3491extern struct notifier_block spi_acpi_notifier;
3492#endif
3493
3494static int __init spi_init(void)
3495{
3496 int status;
3497
3498 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3499 if (!buf) {
3500 status = -ENOMEM;
3501 goto err0;
3502 }
3503
3504 status = bus_register(&spi_bus_type);
3505 if (status < 0)
3506 goto err1;
3507
3508 status = class_register(&spi_master_class);
3509 if (status < 0)
3510 goto err2;
3511
3512 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3513 status = class_register(&spi_slave_class);
3514 if (status < 0)
3515 goto err3;
3516 }
3517
3518 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3519 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3520 if (IS_ENABLED(CONFIG_ACPI))
3521 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3522
3523 return 0;
3524
3525err3:
3526 class_unregister(&spi_master_class);
3527err2:
3528 bus_unregister(&spi_bus_type);
3529err1:
3530 kfree(buf);
3531 buf = NULL;
3532err0:
3533 return status;
3534}
3535
3536/* board_info is normally registered in arch_initcall(),
3537 * but even essential drivers wait till later
3538 *
3539 * REVISIT only boardinfo really needs static linking. the rest (device and
3540 * driver registration) _could_ be dynamically linked (modular) ... costs
3541 * include needing to have boardinfo data structures be much more public.
3542 */
3543postcore_initcall(spi_init);
3544
1// SPDX-License-Identifier: GPL-2.0-or-later
2// SPI init/core code
3//
4// Copyright (C) 2005 David Brownell
5// Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7#include <linux/acpi.h>
8#include <linux/cache.h>
9#include <linux/clk/clk-conf.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/dmaengine.h>
13#include <linux/dma-mapping.h>
14#include <linux/export.h>
15#include <linux/gpio/consumer.h>
16#include <linux/highmem.h>
17#include <linux/idr.h>
18#include <linux/init.h>
19#include <linux/ioport.h>
20#include <linux/kernel.h>
21#include <linux/kthread.h>
22#include <linux/mod_devicetable.h>
23#include <linux/mutex.h>
24#include <linux/of_device.h>
25#include <linux/of_irq.h>
26#include <linux/percpu.h>
27#include <linux/platform_data/x86/apple.h>
28#include <linux/pm_domain.h>
29#include <linux/pm_runtime.h>
30#include <linux/property.h>
31#include <linux/ptp_clock_kernel.h>
32#include <linux/sched/rt.h>
33#include <linux/slab.h>
34#include <linux/spi/spi.h>
35#include <linux/spi/spi-mem.h>
36#include <uapi/linux/sched/types.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/spi.h>
40EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43#include "internals.h"
44
45static DEFINE_IDR(spi_master_idr);
46
47static void spidev_release(struct device *dev)
48{
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55}
56
57static ssize_t
58modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59{
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68}
69static DEVICE_ATTR_RO(modalias);
70
71static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74{
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83}
84
85static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87{
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95}
96static DEVICE_ATTR_RW(driver_override);
97
98static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99{
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118}
119
120static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 char *buf, size_t offset)
122{
123 u64 val = 0;
124 int i;
125
126 for_each_possible_cpu(i) {
127 const struct spi_statistics *pcpu_stats;
128 u64_stats_t *field;
129 unsigned int start;
130 u64 inc;
131
132 pcpu_stats = per_cpu_ptr(stat, i);
133 field = (void *)pcpu_stats + offset;
134 do {
135 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 inc = u64_stats_read(field);
137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 val += inc;
139 }
140 return sysfs_emit(buf, "%llu\n", val);
141}
142
143#define SPI_STATISTICS_ATTRS(field, file) \
144static ssize_t spi_controller_##field##_show(struct device *dev, \
145 struct device_attribute *attr, \
146 char *buf) \
147{ \
148 struct spi_controller *ctlr = container_of(dev, \
149 struct spi_controller, dev); \
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151} \
152static struct device_attribute dev_attr_spi_controller_##field = { \
153 .attr = { .name = file, .mode = 0444 }, \
154 .show = spi_controller_##field##_show, \
155}; \
156static ssize_t spi_device_##field##_show(struct device *dev, \
157 struct device_attribute *attr, \
158 char *buf) \
159{ \
160 struct spi_device *spi = to_spi_device(dev); \
161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162} \
163static struct device_attribute dev_attr_spi_device_##field = { \
164 .attr = { .name = file, .mode = 0444 }, \
165 .show = spi_device_##field##_show, \
166}
167
168#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
169static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 char *buf) \
171{ \
172 return spi_emit_pcpu_stats(stat, buf, \
173 offsetof(struct spi_statistics, field)); \
174} \
175SPI_STATISTICS_ATTRS(name, file)
176
177#define SPI_STATISTICS_SHOW(field) \
178 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
179 field)
180
181SPI_STATISTICS_SHOW(messages);
182SPI_STATISTICS_SHOW(transfers);
183SPI_STATISTICS_SHOW(errors);
184SPI_STATISTICS_SHOW(timedout);
185
186SPI_STATISTICS_SHOW(spi_sync);
187SPI_STATISTICS_SHOW(spi_sync_immediate);
188SPI_STATISTICS_SHOW(spi_async);
189
190SPI_STATISTICS_SHOW(bytes);
191SPI_STATISTICS_SHOW(bytes_rx);
192SPI_STATISTICS_SHOW(bytes_tx);
193
194#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
195 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
196 "transfer_bytes_histo_" number, \
197 transfer_bytes_histo[index])
198SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
199SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
200SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
201SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
202SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
203SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
204SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
205SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
206SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
207SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
208SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215
216SPI_STATISTICS_SHOW(transfers_split_maxsize);
217
218static struct attribute *spi_dev_attrs[] = {
219 &dev_attr_modalias.attr,
220 &dev_attr_driver_override.attr,
221 NULL,
222};
223
224static const struct attribute_group spi_dev_group = {
225 .attrs = spi_dev_attrs,
226};
227
228static struct attribute *spi_device_statistics_attrs[] = {
229 &dev_attr_spi_device_messages.attr,
230 &dev_attr_spi_device_transfers.attr,
231 &dev_attr_spi_device_errors.attr,
232 &dev_attr_spi_device_timedout.attr,
233 &dev_attr_spi_device_spi_sync.attr,
234 &dev_attr_spi_device_spi_sync_immediate.attr,
235 &dev_attr_spi_device_spi_async.attr,
236 &dev_attr_spi_device_bytes.attr,
237 &dev_attr_spi_device_bytes_rx.attr,
238 &dev_attr_spi_device_bytes_tx.attr,
239 &dev_attr_spi_device_transfer_bytes_histo0.attr,
240 &dev_attr_spi_device_transfer_bytes_histo1.attr,
241 &dev_attr_spi_device_transfer_bytes_histo2.attr,
242 &dev_attr_spi_device_transfer_bytes_histo3.attr,
243 &dev_attr_spi_device_transfer_bytes_histo4.attr,
244 &dev_attr_spi_device_transfer_bytes_histo5.attr,
245 &dev_attr_spi_device_transfer_bytes_histo6.attr,
246 &dev_attr_spi_device_transfer_bytes_histo7.attr,
247 &dev_attr_spi_device_transfer_bytes_histo8.attr,
248 &dev_attr_spi_device_transfer_bytes_histo9.attr,
249 &dev_attr_spi_device_transfer_bytes_histo10.attr,
250 &dev_attr_spi_device_transfer_bytes_histo11.attr,
251 &dev_attr_spi_device_transfer_bytes_histo12.attr,
252 &dev_attr_spi_device_transfer_bytes_histo13.attr,
253 &dev_attr_spi_device_transfer_bytes_histo14.attr,
254 &dev_attr_spi_device_transfer_bytes_histo15.attr,
255 &dev_attr_spi_device_transfer_bytes_histo16.attr,
256 &dev_attr_spi_device_transfers_split_maxsize.attr,
257 NULL,
258};
259
260static const struct attribute_group spi_device_statistics_group = {
261 .name = "statistics",
262 .attrs = spi_device_statistics_attrs,
263};
264
265static const struct attribute_group *spi_dev_groups[] = {
266 &spi_dev_group,
267 &spi_device_statistics_group,
268 NULL,
269};
270
271static struct attribute *spi_controller_statistics_attrs[] = {
272 &dev_attr_spi_controller_messages.attr,
273 &dev_attr_spi_controller_transfers.attr,
274 &dev_attr_spi_controller_errors.attr,
275 &dev_attr_spi_controller_timedout.attr,
276 &dev_attr_spi_controller_spi_sync.attr,
277 &dev_attr_spi_controller_spi_sync_immediate.attr,
278 &dev_attr_spi_controller_spi_async.attr,
279 &dev_attr_spi_controller_bytes.attr,
280 &dev_attr_spi_controller_bytes_rx.attr,
281 &dev_attr_spi_controller_bytes_tx.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 &dev_attr_spi_controller_transfers_split_maxsize.attr,
300 NULL,
301};
302
303static const struct attribute_group spi_controller_statistics_group = {
304 .name = "statistics",
305 .attrs = spi_controller_statistics_attrs,
306};
307
308static const struct attribute_group *spi_master_groups[] = {
309 &spi_controller_statistics_group,
310 NULL,
311};
312
313static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 struct spi_transfer *xfer,
315 struct spi_controller *ctlr)
316{
317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 struct spi_statistics *stats;
319
320 if (l2len < 0)
321 l2len = 0;
322
323 get_cpu();
324 stats = this_cpu_ptr(pcpu_stats);
325 u64_stats_update_begin(&stats->syncp);
326
327 u64_stats_inc(&stats->transfers);
328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329
330 u64_stats_add(&stats->bytes, xfer->len);
331 if ((xfer->tx_buf) &&
332 (xfer->tx_buf != ctlr->dummy_tx))
333 u64_stats_add(&stats->bytes_tx, xfer->len);
334 if ((xfer->rx_buf) &&
335 (xfer->rx_buf != ctlr->dummy_rx))
336 u64_stats_add(&stats->bytes_rx, xfer->len);
337
338 u64_stats_update_end(&stats->syncp);
339 put_cpu();
340}
341
342/*
343 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
344 * and the sysfs version makes coldplug work too.
345 */
346static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
347{
348 while (id->name[0]) {
349 if (!strcmp(name, id->name))
350 return id;
351 id++;
352 }
353 return NULL;
354}
355
356const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
357{
358 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
359
360 return spi_match_id(sdrv->id_table, sdev->modalias);
361}
362EXPORT_SYMBOL_GPL(spi_get_device_id);
363
364const void *spi_get_device_match_data(const struct spi_device *sdev)
365{
366 const void *match;
367
368 match = device_get_match_data(&sdev->dev);
369 if (match)
370 return match;
371
372 return (const void *)spi_get_device_id(sdev)->driver_data;
373}
374EXPORT_SYMBOL_GPL(spi_get_device_match_data);
375
376static int spi_match_device(struct device *dev, struct device_driver *drv)
377{
378 const struct spi_device *spi = to_spi_device(dev);
379 const struct spi_driver *sdrv = to_spi_driver(drv);
380
381 /* Check override first, and if set, only use the named driver */
382 if (spi->driver_override)
383 return strcmp(spi->driver_override, drv->name) == 0;
384
385 /* Attempt an OF style match */
386 if (of_driver_match_device(dev, drv))
387 return 1;
388
389 /* Then try ACPI */
390 if (acpi_driver_match_device(dev, drv))
391 return 1;
392
393 if (sdrv->id_table)
394 return !!spi_match_id(sdrv->id_table, spi->modalias);
395
396 return strcmp(spi->modalias, drv->name) == 0;
397}
398
399static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
400{
401 const struct spi_device *spi = to_spi_device(dev);
402 int rc;
403
404 rc = acpi_device_uevent_modalias(dev, env);
405 if (rc != -ENODEV)
406 return rc;
407
408 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
409}
410
411static int spi_probe(struct device *dev)
412{
413 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
414 struct spi_device *spi = to_spi_device(dev);
415 int ret;
416
417 ret = of_clk_set_defaults(dev->of_node, false);
418 if (ret)
419 return ret;
420
421 if (dev->of_node) {
422 spi->irq = of_irq_get(dev->of_node, 0);
423 if (spi->irq == -EPROBE_DEFER)
424 return -EPROBE_DEFER;
425 if (spi->irq < 0)
426 spi->irq = 0;
427 }
428
429 ret = dev_pm_domain_attach(dev, true);
430 if (ret)
431 return ret;
432
433 if (sdrv->probe) {
434 ret = sdrv->probe(spi);
435 if (ret)
436 dev_pm_domain_detach(dev, true);
437 }
438
439 return ret;
440}
441
442static void spi_remove(struct device *dev)
443{
444 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
445
446 if (sdrv->remove)
447 sdrv->remove(to_spi_device(dev));
448
449 dev_pm_domain_detach(dev, true);
450}
451
452static void spi_shutdown(struct device *dev)
453{
454 if (dev->driver) {
455 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
456
457 if (sdrv->shutdown)
458 sdrv->shutdown(to_spi_device(dev));
459 }
460}
461
462const struct bus_type spi_bus_type = {
463 .name = "spi",
464 .dev_groups = spi_dev_groups,
465 .match = spi_match_device,
466 .uevent = spi_uevent,
467 .probe = spi_probe,
468 .remove = spi_remove,
469 .shutdown = spi_shutdown,
470};
471EXPORT_SYMBOL_GPL(spi_bus_type);
472
473/**
474 * __spi_register_driver - register a SPI driver
475 * @owner: owner module of the driver to register
476 * @sdrv: the driver to register
477 * Context: can sleep
478 *
479 * Return: zero on success, else a negative error code.
480 */
481int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
482{
483 sdrv->driver.owner = owner;
484 sdrv->driver.bus = &spi_bus_type;
485
486 /*
487 * For Really Good Reasons we use spi: modaliases not of:
488 * modaliases for DT so module autoloading won't work if we
489 * don't have a spi_device_id as well as a compatible string.
490 */
491 if (sdrv->driver.of_match_table) {
492 const struct of_device_id *of_id;
493
494 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
495 of_id++) {
496 const char *of_name;
497
498 /* Strip off any vendor prefix */
499 of_name = strnchr(of_id->compatible,
500 sizeof(of_id->compatible), ',');
501 if (of_name)
502 of_name++;
503 else
504 of_name = of_id->compatible;
505
506 if (sdrv->id_table) {
507 const struct spi_device_id *spi_id;
508
509 spi_id = spi_match_id(sdrv->id_table, of_name);
510 if (spi_id)
511 continue;
512 } else {
513 if (strcmp(sdrv->driver.name, of_name) == 0)
514 continue;
515 }
516
517 pr_warn("SPI driver %s has no spi_device_id for %s\n",
518 sdrv->driver.name, of_id->compatible);
519 }
520 }
521
522 return driver_register(&sdrv->driver);
523}
524EXPORT_SYMBOL_GPL(__spi_register_driver);
525
526/*-------------------------------------------------------------------------*/
527
528/*
529 * SPI devices should normally not be created by SPI device drivers; that
530 * would make them board-specific. Similarly with SPI controller drivers.
531 * Device registration normally goes into like arch/.../mach.../board-YYY.c
532 * with other readonly (flashable) information about mainboard devices.
533 */
534
535struct boardinfo {
536 struct list_head list;
537 struct spi_board_info board_info;
538};
539
540static LIST_HEAD(board_list);
541static LIST_HEAD(spi_controller_list);
542
543/*
544 * Used to protect add/del operation for board_info list and
545 * spi_controller list, and their matching process also used
546 * to protect object of type struct idr.
547 */
548static DEFINE_MUTEX(board_lock);
549
550/**
551 * spi_alloc_device - Allocate a new SPI device
552 * @ctlr: Controller to which device is connected
553 * Context: can sleep
554 *
555 * Allows a driver to allocate and initialize a spi_device without
556 * registering it immediately. This allows a driver to directly
557 * fill the spi_device with device parameters before calling
558 * spi_add_device() on it.
559 *
560 * Caller is responsible to call spi_add_device() on the returned
561 * spi_device structure to add it to the SPI controller. If the caller
562 * needs to discard the spi_device without adding it, then it should
563 * call spi_dev_put() on it.
564 *
565 * Return: a pointer to the new device, or NULL.
566 */
567struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
568{
569 struct spi_device *spi;
570
571 if (!spi_controller_get(ctlr))
572 return NULL;
573
574 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
575 if (!spi) {
576 spi_controller_put(ctlr);
577 return NULL;
578 }
579
580 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
581 if (!spi->pcpu_statistics) {
582 kfree(spi);
583 spi_controller_put(ctlr);
584 return NULL;
585 }
586
587 spi->controller = ctlr;
588 spi->dev.parent = &ctlr->dev;
589 spi->dev.bus = &spi_bus_type;
590 spi->dev.release = spidev_release;
591 spi->mode = ctlr->buswidth_override_bits;
592
593 device_initialize(&spi->dev);
594 return spi;
595}
596EXPORT_SYMBOL_GPL(spi_alloc_device);
597
598static void spi_dev_set_name(struct spi_device *spi)
599{
600 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
601
602 if (adev) {
603 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
604 return;
605 }
606
607 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
608 spi_get_chipselect(spi, 0));
609}
610
611/*
612 * Zero(0) is a valid physical CS value and can be located at any
613 * logical CS in the spi->chip_select[]. If all the physical CS
614 * are initialized to 0 then It would be difficult to differentiate
615 * between a valid physical CS 0 & an unused logical CS whose physical
616 * CS can be 0. As a solution to this issue initialize all the CS to -1.
617 * Now all the unused logical CS will have -1 physical CS value & can be
618 * ignored while performing physical CS validity checks.
619 */
620#define SPI_INVALID_CS ((s8)-1)
621
622static inline bool is_valid_cs(s8 chip_select)
623{
624 return chip_select != SPI_INVALID_CS;
625}
626
627static inline int spi_dev_check_cs(struct device *dev,
628 struct spi_device *spi, u8 idx,
629 struct spi_device *new_spi, u8 new_idx)
630{
631 u8 cs, cs_new;
632 u8 idx_new;
633
634 cs = spi_get_chipselect(spi, idx);
635 for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
636 cs_new = spi_get_chipselect(new_spi, idx_new);
637 if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
638 dev_err(dev, "chipselect %u already in use\n", cs_new);
639 return -EBUSY;
640 }
641 }
642 return 0;
643}
644
645static int spi_dev_check(struct device *dev, void *data)
646{
647 struct spi_device *spi = to_spi_device(dev);
648 struct spi_device *new_spi = data;
649 int status, idx;
650
651 if (spi->controller == new_spi->controller) {
652 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
653 status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
654 if (status)
655 return status;
656 }
657 }
658 return 0;
659}
660
661static void spi_cleanup(struct spi_device *spi)
662{
663 if (spi->controller->cleanup)
664 spi->controller->cleanup(spi);
665}
666
667static int __spi_add_device(struct spi_device *spi)
668{
669 struct spi_controller *ctlr = spi->controller;
670 struct device *dev = ctlr->dev.parent;
671 int status, idx;
672 u8 cs;
673
674 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
675 /* Chipselects are numbered 0..max; validate. */
676 cs = spi_get_chipselect(spi, idx);
677 if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
678 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
679 ctlr->num_chipselect);
680 return -EINVAL;
681 }
682 }
683
684 /*
685 * Make sure that multiple logical CS doesn't map to the same physical CS.
686 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
687 */
688 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
689 status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
690 if (status)
691 return status;
692 }
693
694 /* Set the bus ID string */
695 spi_dev_set_name(spi);
696
697 /*
698 * We need to make sure there's no other device with this
699 * chipselect **BEFORE** we call setup(), else we'll trash
700 * its configuration.
701 */
702 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
703 if (status)
704 return status;
705
706 /* Controller may unregister concurrently */
707 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
708 !device_is_registered(&ctlr->dev)) {
709 return -ENODEV;
710 }
711
712 if (ctlr->cs_gpiods) {
713 u8 cs;
714
715 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
716 cs = spi_get_chipselect(spi, idx);
717 if (is_valid_cs(cs))
718 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
719 }
720 }
721
722 /*
723 * Drivers may modify this initial i/o setup, but will
724 * normally rely on the device being setup. Devices
725 * using SPI_CS_HIGH can't coexist well otherwise...
726 */
727 status = spi_setup(spi);
728 if (status < 0) {
729 dev_err(dev, "can't setup %s, status %d\n",
730 dev_name(&spi->dev), status);
731 return status;
732 }
733
734 /* Device may be bound to an active driver when this returns */
735 status = device_add(&spi->dev);
736 if (status < 0) {
737 dev_err(dev, "can't add %s, status %d\n",
738 dev_name(&spi->dev), status);
739 spi_cleanup(spi);
740 } else {
741 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
742 }
743
744 return status;
745}
746
747/**
748 * spi_add_device - Add spi_device allocated with spi_alloc_device
749 * @spi: spi_device to register
750 *
751 * Companion function to spi_alloc_device. Devices allocated with
752 * spi_alloc_device can be added onto the SPI bus with this function.
753 *
754 * Return: 0 on success; negative errno on failure
755 */
756int spi_add_device(struct spi_device *spi)
757{
758 struct spi_controller *ctlr = spi->controller;
759 int status;
760
761 /* Set the bus ID string */
762 spi_dev_set_name(spi);
763
764 mutex_lock(&ctlr->add_lock);
765 status = __spi_add_device(spi);
766 mutex_unlock(&ctlr->add_lock);
767 return status;
768}
769EXPORT_SYMBOL_GPL(spi_add_device);
770
771static void spi_set_all_cs_unused(struct spi_device *spi)
772{
773 u8 idx;
774
775 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
776 spi_set_chipselect(spi, idx, SPI_INVALID_CS);
777}
778
779/**
780 * spi_new_device - instantiate one new SPI device
781 * @ctlr: Controller to which device is connected
782 * @chip: Describes the SPI device
783 * Context: can sleep
784 *
785 * On typical mainboards, this is purely internal; and it's not needed
786 * after board init creates the hard-wired devices. Some development
787 * platforms may not be able to use spi_register_board_info though, and
788 * this is exported so that for example a USB or parport based adapter
789 * driver could add devices (which it would learn about out-of-band).
790 *
791 * Return: the new device, or NULL.
792 */
793struct spi_device *spi_new_device(struct spi_controller *ctlr,
794 struct spi_board_info *chip)
795{
796 struct spi_device *proxy;
797 int status;
798
799 /*
800 * NOTE: caller did any chip->bus_num checks necessary.
801 *
802 * Also, unless we change the return value convention to use
803 * error-or-pointer (not NULL-or-pointer), troubleshootability
804 * suggests syslogged diagnostics are best here (ugh).
805 */
806
807 proxy = spi_alloc_device(ctlr);
808 if (!proxy)
809 return NULL;
810
811 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
812
813 /* Use provided chip-select for proxy device */
814 spi_set_all_cs_unused(proxy);
815 spi_set_chipselect(proxy, 0, chip->chip_select);
816
817 proxy->max_speed_hz = chip->max_speed_hz;
818 proxy->mode = chip->mode;
819 proxy->irq = chip->irq;
820 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
821 proxy->dev.platform_data = (void *) chip->platform_data;
822 proxy->controller_data = chip->controller_data;
823 proxy->controller_state = NULL;
824 /*
825 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
826 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
827 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
828 * spi->chip_select[0] will give the physical CS.
829 * By default spi->chip_select[0] will hold the physical CS number so, set
830 * spi->cs_index_mask as 0x01.
831 */
832 proxy->cs_index_mask = 0x01;
833
834 if (chip->swnode) {
835 status = device_add_software_node(&proxy->dev, chip->swnode);
836 if (status) {
837 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
838 chip->modalias, status);
839 goto err_dev_put;
840 }
841 }
842
843 status = spi_add_device(proxy);
844 if (status < 0)
845 goto err_dev_put;
846
847 return proxy;
848
849err_dev_put:
850 device_remove_software_node(&proxy->dev);
851 spi_dev_put(proxy);
852 return NULL;
853}
854EXPORT_SYMBOL_GPL(spi_new_device);
855
856/**
857 * spi_unregister_device - unregister a single SPI device
858 * @spi: spi_device to unregister
859 *
860 * Start making the passed SPI device vanish. Normally this would be handled
861 * by spi_unregister_controller().
862 */
863void spi_unregister_device(struct spi_device *spi)
864{
865 if (!spi)
866 return;
867
868 if (spi->dev.of_node) {
869 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
870 of_node_put(spi->dev.of_node);
871 }
872 if (ACPI_COMPANION(&spi->dev))
873 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
874 device_remove_software_node(&spi->dev);
875 device_del(&spi->dev);
876 spi_cleanup(spi);
877 put_device(&spi->dev);
878}
879EXPORT_SYMBOL_GPL(spi_unregister_device);
880
881static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
882 struct spi_board_info *bi)
883{
884 struct spi_device *dev;
885
886 if (ctlr->bus_num != bi->bus_num)
887 return;
888
889 dev = spi_new_device(ctlr, bi);
890 if (!dev)
891 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
892 bi->modalias);
893}
894
895/**
896 * spi_register_board_info - register SPI devices for a given board
897 * @info: array of chip descriptors
898 * @n: how many descriptors are provided
899 * Context: can sleep
900 *
901 * Board-specific early init code calls this (probably during arch_initcall)
902 * with segments of the SPI device table. Any device nodes are created later,
903 * after the relevant parent SPI controller (bus_num) is defined. We keep
904 * this table of devices forever, so that reloading a controller driver will
905 * not make Linux forget about these hard-wired devices.
906 *
907 * Other code can also call this, e.g. a particular add-on board might provide
908 * SPI devices through its expansion connector, so code initializing that board
909 * would naturally declare its SPI devices.
910 *
911 * The board info passed can safely be __initdata ... but be careful of
912 * any embedded pointers (platform_data, etc), they're copied as-is.
913 *
914 * Return: zero on success, else a negative error code.
915 */
916int spi_register_board_info(struct spi_board_info const *info, unsigned n)
917{
918 struct boardinfo *bi;
919 int i;
920
921 if (!n)
922 return 0;
923
924 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
925 if (!bi)
926 return -ENOMEM;
927
928 for (i = 0; i < n; i++, bi++, info++) {
929 struct spi_controller *ctlr;
930
931 memcpy(&bi->board_info, info, sizeof(*info));
932
933 mutex_lock(&board_lock);
934 list_add_tail(&bi->list, &board_list);
935 list_for_each_entry(ctlr, &spi_controller_list, list)
936 spi_match_controller_to_boardinfo(ctlr,
937 &bi->board_info);
938 mutex_unlock(&board_lock);
939 }
940
941 return 0;
942}
943
944/*-------------------------------------------------------------------------*/
945
946/* Core methods for SPI resource management */
947
948/**
949 * spi_res_alloc - allocate a spi resource that is life-cycle managed
950 * during the processing of a spi_message while using
951 * spi_transfer_one
952 * @spi: the SPI device for which we allocate memory
953 * @release: the release code to execute for this resource
954 * @size: size to alloc and return
955 * @gfp: GFP allocation flags
956 *
957 * Return: the pointer to the allocated data
958 *
959 * This may get enhanced in the future to allocate from a memory pool
960 * of the @spi_device or @spi_controller to avoid repeated allocations.
961 */
962static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
963 size_t size, gfp_t gfp)
964{
965 struct spi_res *sres;
966
967 sres = kzalloc(sizeof(*sres) + size, gfp);
968 if (!sres)
969 return NULL;
970
971 INIT_LIST_HEAD(&sres->entry);
972 sres->release = release;
973
974 return sres->data;
975}
976
977/**
978 * spi_res_free - free an SPI resource
979 * @res: pointer to the custom data of a resource
980 */
981static void spi_res_free(void *res)
982{
983 struct spi_res *sres = container_of(res, struct spi_res, data);
984
985 if (!res)
986 return;
987
988 WARN_ON(!list_empty(&sres->entry));
989 kfree(sres);
990}
991
992/**
993 * spi_res_add - add a spi_res to the spi_message
994 * @message: the SPI message
995 * @res: the spi_resource
996 */
997static void spi_res_add(struct spi_message *message, void *res)
998{
999 struct spi_res *sres = container_of(res, struct spi_res, data);
1000
1001 WARN_ON(!list_empty(&sres->entry));
1002 list_add_tail(&sres->entry, &message->resources);
1003}
1004
1005/**
1006 * spi_res_release - release all SPI resources for this message
1007 * @ctlr: the @spi_controller
1008 * @message: the @spi_message
1009 */
1010static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1011{
1012 struct spi_res *res, *tmp;
1013
1014 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1015 if (res->release)
1016 res->release(ctlr, message, res->data);
1017
1018 list_del(&res->entry);
1019
1020 kfree(res);
1021 }
1022}
1023
1024/*-------------------------------------------------------------------------*/
1025static inline bool spi_is_last_cs(struct spi_device *spi)
1026{
1027 u8 idx;
1028 bool last = false;
1029
1030 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
1031 if (spi->cs_index_mask & BIT(idx)) {
1032 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1033 last = true;
1034 }
1035 }
1036 return last;
1037}
1038
1039
1040static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1041{
1042 bool activate = enable;
1043 u8 idx;
1044
1045 /*
1046 * Avoid calling into the driver (or doing delays) if the chip select
1047 * isn't actually changing from the last time this was called.
1048 */
1049 if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1050 spi_is_last_cs(spi)) ||
1051 (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1052 !spi_is_last_cs(spi))) &&
1053 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1054 return;
1055
1056 trace_spi_set_cs(spi, activate);
1057
1058 spi->controller->last_cs_index_mask = spi->cs_index_mask;
1059 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1060 spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1061 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1062
1063 if (spi->mode & SPI_CS_HIGH)
1064 enable = !enable;
1065
1066 /*
1067 * Handle chip select delays for GPIO based CS or controllers without
1068 * programmable chip select timing.
1069 */
1070 if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1071 spi_delay_exec(&spi->cs_hold, NULL);
1072
1073 if (spi_is_csgpiod(spi)) {
1074 if (!(spi->mode & SPI_NO_CS)) {
1075 /*
1076 * Historically ACPI has no means of the GPIO polarity and
1077 * thus the SPISerialBus() resource defines it on the per-chip
1078 * basis. In order to avoid a chain of negations, the GPIO
1079 * polarity is considered being Active High. Even for the cases
1080 * when _DSD() is involved (in the updated versions of ACPI)
1081 * the GPIO CS polarity must be defined Active High to avoid
1082 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1083 * into account.
1084 */
1085 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
1086 if ((spi->cs_index_mask & BIT(idx)) && spi_get_csgpiod(spi, idx)) {
1087 if (has_acpi_companion(&spi->dev))
1088 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
1089 !enable);
1090 else
1091 /* Polarity handled by GPIO library */
1092 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
1093 activate);
1094
1095 if (activate)
1096 spi_delay_exec(&spi->cs_setup, NULL);
1097 else
1098 spi_delay_exec(&spi->cs_inactive, NULL);
1099 }
1100 }
1101 }
1102 /* Some SPI masters need both GPIO CS & slave_select */
1103 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1104 spi->controller->set_cs)
1105 spi->controller->set_cs(spi, !enable);
1106 } else if (spi->controller->set_cs) {
1107 spi->controller->set_cs(spi, !enable);
1108 }
1109
1110 if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1111 if (activate)
1112 spi_delay_exec(&spi->cs_setup, NULL);
1113 else
1114 spi_delay_exec(&spi->cs_inactive, NULL);
1115 }
1116}
1117
1118#ifdef CONFIG_HAS_DMA
1119static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1120 struct sg_table *sgt, void *buf, size_t len,
1121 enum dma_data_direction dir, unsigned long attrs)
1122{
1123 const bool vmalloced_buf = is_vmalloc_addr(buf);
1124 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1125#ifdef CONFIG_HIGHMEM
1126 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1127 (unsigned long)buf < (PKMAP_BASE +
1128 (LAST_PKMAP * PAGE_SIZE)));
1129#else
1130 const bool kmap_buf = false;
1131#endif
1132 int desc_len;
1133 int sgs;
1134 struct page *vm_page;
1135 struct scatterlist *sg;
1136 void *sg_buf;
1137 size_t min;
1138 int i, ret;
1139
1140 if (vmalloced_buf || kmap_buf) {
1141 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1142 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1143 } else if (virt_addr_valid(buf)) {
1144 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1145 sgs = DIV_ROUND_UP(len, desc_len);
1146 } else {
1147 return -EINVAL;
1148 }
1149
1150 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1151 if (ret != 0)
1152 return ret;
1153
1154 sg = &sgt->sgl[0];
1155 for (i = 0; i < sgs; i++) {
1156
1157 if (vmalloced_buf || kmap_buf) {
1158 /*
1159 * Next scatterlist entry size is the minimum between
1160 * the desc_len and the remaining buffer length that
1161 * fits in a page.
1162 */
1163 min = min_t(size_t, desc_len,
1164 min_t(size_t, len,
1165 PAGE_SIZE - offset_in_page(buf)));
1166 if (vmalloced_buf)
1167 vm_page = vmalloc_to_page(buf);
1168 else
1169 vm_page = kmap_to_page(buf);
1170 if (!vm_page) {
1171 sg_free_table(sgt);
1172 return -ENOMEM;
1173 }
1174 sg_set_page(sg, vm_page,
1175 min, offset_in_page(buf));
1176 } else {
1177 min = min_t(size_t, len, desc_len);
1178 sg_buf = buf;
1179 sg_set_buf(sg, sg_buf, min);
1180 }
1181
1182 buf += min;
1183 len -= min;
1184 sg = sg_next(sg);
1185 }
1186
1187 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1188 if (ret < 0) {
1189 sg_free_table(sgt);
1190 return ret;
1191 }
1192
1193 return 0;
1194}
1195
1196int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1197 struct sg_table *sgt, void *buf, size_t len,
1198 enum dma_data_direction dir)
1199{
1200 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1201}
1202
1203static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1204 struct device *dev, struct sg_table *sgt,
1205 enum dma_data_direction dir,
1206 unsigned long attrs)
1207{
1208 if (sgt->orig_nents) {
1209 dma_unmap_sgtable(dev, sgt, dir, attrs);
1210 sg_free_table(sgt);
1211 sgt->orig_nents = 0;
1212 sgt->nents = 0;
1213 }
1214}
1215
1216void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1217 struct sg_table *sgt, enum dma_data_direction dir)
1218{
1219 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1220}
1221
1222static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1223{
1224 struct device *tx_dev, *rx_dev;
1225 struct spi_transfer *xfer;
1226 int ret;
1227
1228 if (!ctlr->can_dma)
1229 return 0;
1230
1231 if (ctlr->dma_tx)
1232 tx_dev = ctlr->dma_tx->device->dev;
1233 else if (ctlr->dma_map_dev)
1234 tx_dev = ctlr->dma_map_dev;
1235 else
1236 tx_dev = ctlr->dev.parent;
1237
1238 if (ctlr->dma_rx)
1239 rx_dev = ctlr->dma_rx->device->dev;
1240 else if (ctlr->dma_map_dev)
1241 rx_dev = ctlr->dma_map_dev;
1242 else
1243 rx_dev = ctlr->dev.parent;
1244
1245 ret = -ENOMSG;
1246 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1247 /* The sync is done before each transfer. */
1248 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1249
1250 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1251 continue;
1252
1253 if (xfer->tx_buf != NULL) {
1254 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1255 (void *)xfer->tx_buf,
1256 xfer->len, DMA_TO_DEVICE,
1257 attrs);
1258 if (ret != 0)
1259 return ret;
1260 }
1261
1262 if (xfer->rx_buf != NULL) {
1263 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1264 xfer->rx_buf, xfer->len,
1265 DMA_FROM_DEVICE, attrs);
1266 if (ret != 0) {
1267 spi_unmap_buf_attrs(ctlr, tx_dev,
1268 &xfer->tx_sg, DMA_TO_DEVICE,
1269 attrs);
1270
1271 return ret;
1272 }
1273 }
1274 }
1275 /* No transfer has been mapped, bail out with success */
1276 if (ret)
1277 return 0;
1278
1279 ctlr->cur_rx_dma_dev = rx_dev;
1280 ctlr->cur_tx_dma_dev = tx_dev;
1281 ctlr->cur_msg_mapped = true;
1282
1283 return 0;
1284}
1285
1286static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1287{
1288 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1289 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1290 struct spi_transfer *xfer;
1291
1292 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1293 return 0;
1294
1295 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1296 /* The sync has already been done after each transfer. */
1297 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1298
1299 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1300 continue;
1301
1302 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1303 DMA_FROM_DEVICE, attrs);
1304 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1305 DMA_TO_DEVICE, attrs);
1306 }
1307
1308 ctlr->cur_msg_mapped = false;
1309
1310 return 0;
1311}
1312
1313static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1314 struct spi_transfer *xfer)
1315{
1316 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1317 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1318
1319 if (!ctlr->cur_msg_mapped)
1320 return;
1321
1322 if (xfer->tx_sg.orig_nents)
1323 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1324 if (xfer->rx_sg.orig_nents)
1325 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1326}
1327
1328static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1329 struct spi_transfer *xfer)
1330{
1331 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1332 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1333
1334 if (!ctlr->cur_msg_mapped)
1335 return;
1336
1337 if (xfer->rx_sg.orig_nents)
1338 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1339 if (xfer->tx_sg.orig_nents)
1340 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1341}
1342#else /* !CONFIG_HAS_DMA */
1343static inline int __spi_map_msg(struct spi_controller *ctlr,
1344 struct spi_message *msg)
1345{
1346 return 0;
1347}
1348
1349static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1350 struct spi_message *msg)
1351{
1352 return 0;
1353}
1354
1355static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1356 struct spi_transfer *xfer)
1357{
1358}
1359
1360static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1361 struct spi_transfer *xfer)
1362{
1363}
1364#endif /* !CONFIG_HAS_DMA */
1365
1366static inline int spi_unmap_msg(struct spi_controller *ctlr,
1367 struct spi_message *msg)
1368{
1369 struct spi_transfer *xfer;
1370
1371 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1372 /*
1373 * Restore the original value of tx_buf or rx_buf if they are
1374 * NULL.
1375 */
1376 if (xfer->tx_buf == ctlr->dummy_tx)
1377 xfer->tx_buf = NULL;
1378 if (xfer->rx_buf == ctlr->dummy_rx)
1379 xfer->rx_buf = NULL;
1380 }
1381
1382 return __spi_unmap_msg(ctlr, msg);
1383}
1384
1385static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1386{
1387 struct spi_transfer *xfer;
1388 void *tmp;
1389 unsigned int max_tx, max_rx;
1390
1391 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1392 && !(msg->spi->mode & SPI_3WIRE)) {
1393 max_tx = 0;
1394 max_rx = 0;
1395
1396 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1397 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1398 !xfer->tx_buf)
1399 max_tx = max(xfer->len, max_tx);
1400 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1401 !xfer->rx_buf)
1402 max_rx = max(xfer->len, max_rx);
1403 }
1404
1405 if (max_tx) {
1406 tmp = krealloc(ctlr->dummy_tx, max_tx,
1407 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1408 if (!tmp)
1409 return -ENOMEM;
1410 ctlr->dummy_tx = tmp;
1411 }
1412
1413 if (max_rx) {
1414 tmp = krealloc(ctlr->dummy_rx, max_rx,
1415 GFP_KERNEL | GFP_DMA);
1416 if (!tmp)
1417 return -ENOMEM;
1418 ctlr->dummy_rx = tmp;
1419 }
1420
1421 if (max_tx || max_rx) {
1422 list_for_each_entry(xfer, &msg->transfers,
1423 transfer_list) {
1424 if (!xfer->len)
1425 continue;
1426 if (!xfer->tx_buf)
1427 xfer->tx_buf = ctlr->dummy_tx;
1428 if (!xfer->rx_buf)
1429 xfer->rx_buf = ctlr->dummy_rx;
1430 }
1431 }
1432 }
1433
1434 return __spi_map_msg(ctlr, msg);
1435}
1436
1437static int spi_transfer_wait(struct spi_controller *ctlr,
1438 struct spi_message *msg,
1439 struct spi_transfer *xfer)
1440{
1441 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1442 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1443 u32 speed_hz = xfer->speed_hz;
1444 unsigned long long ms;
1445
1446 if (spi_controller_is_slave(ctlr)) {
1447 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1448 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1449 return -EINTR;
1450 }
1451 } else {
1452 if (!speed_hz)
1453 speed_hz = 100000;
1454
1455 /*
1456 * For each byte we wait for 8 cycles of the SPI clock.
1457 * Since speed is defined in Hz and we want milliseconds,
1458 * use respective multiplier, but before the division,
1459 * otherwise we may get 0 for short transfers.
1460 */
1461 ms = 8LL * MSEC_PER_SEC * xfer->len;
1462 do_div(ms, speed_hz);
1463
1464 /*
1465 * Increase it twice and add 200 ms tolerance, use
1466 * predefined maximum in case of overflow.
1467 */
1468 ms += ms + 200;
1469 if (ms > UINT_MAX)
1470 ms = UINT_MAX;
1471
1472 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1473 msecs_to_jiffies(ms));
1474
1475 if (ms == 0) {
1476 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1477 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1478 dev_err(&msg->spi->dev,
1479 "SPI transfer timed out\n");
1480 return -ETIMEDOUT;
1481 }
1482
1483 if (xfer->error & SPI_TRANS_FAIL_IO)
1484 return -EIO;
1485 }
1486
1487 return 0;
1488}
1489
1490static void _spi_transfer_delay_ns(u32 ns)
1491{
1492 if (!ns)
1493 return;
1494 if (ns <= NSEC_PER_USEC) {
1495 ndelay(ns);
1496 } else {
1497 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1498
1499 if (us <= 10)
1500 udelay(us);
1501 else
1502 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1503 }
1504}
1505
1506int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1507{
1508 u32 delay = _delay->value;
1509 u32 unit = _delay->unit;
1510 u32 hz;
1511
1512 if (!delay)
1513 return 0;
1514
1515 switch (unit) {
1516 case SPI_DELAY_UNIT_USECS:
1517 delay *= NSEC_PER_USEC;
1518 break;
1519 case SPI_DELAY_UNIT_NSECS:
1520 /* Nothing to do here */
1521 break;
1522 case SPI_DELAY_UNIT_SCK:
1523 /* Clock cycles need to be obtained from spi_transfer */
1524 if (!xfer)
1525 return -EINVAL;
1526 /*
1527 * If there is unknown effective speed, approximate it
1528 * by underestimating with half of the requested Hz.
1529 */
1530 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1531 if (!hz)
1532 return -EINVAL;
1533
1534 /* Convert delay to nanoseconds */
1535 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1536 break;
1537 default:
1538 return -EINVAL;
1539 }
1540
1541 return delay;
1542}
1543EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1544
1545int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1546{
1547 int delay;
1548
1549 might_sleep();
1550
1551 if (!_delay)
1552 return -EINVAL;
1553
1554 delay = spi_delay_to_ns(_delay, xfer);
1555 if (delay < 0)
1556 return delay;
1557
1558 _spi_transfer_delay_ns(delay);
1559
1560 return 0;
1561}
1562EXPORT_SYMBOL_GPL(spi_delay_exec);
1563
1564static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1565 struct spi_transfer *xfer)
1566{
1567 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1568 u32 delay = xfer->cs_change_delay.value;
1569 u32 unit = xfer->cs_change_delay.unit;
1570 int ret;
1571
1572 /* Return early on "fast" mode - for everything but USECS */
1573 if (!delay) {
1574 if (unit == SPI_DELAY_UNIT_USECS)
1575 _spi_transfer_delay_ns(default_delay_ns);
1576 return;
1577 }
1578
1579 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1580 if (ret) {
1581 dev_err_once(&msg->spi->dev,
1582 "Use of unsupported delay unit %i, using default of %luus\n",
1583 unit, default_delay_ns / NSEC_PER_USEC);
1584 _spi_transfer_delay_ns(default_delay_ns);
1585 }
1586}
1587
1588void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1589 struct spi_transfer *xfer)
1590{
1591 _spi_transfer_cs_change_delay(msg, xfer);
1592}
1593EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1594
1595/*
1596 * spi_transfer_one_message - Default implementation of transfer_one_message()
1597 *
1598 * This is a standard implementation of transfer_one_message() for
1599 * drivers which implement a transfer_one() operation. It provides
1600 * standard handling of delays and chip select management.
1601 */
1602static int spi_transfer_one_message(struct spi_controller *ctlr,
1603 struct spi_message *msg)
1604{
1605 struct spi_transfer *xfer;
1606 bool keep_cs = false;
1607 int ret = 0;
1608 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1609 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1610
1611 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1612 spi_set_cs(msg->spi, !xfer->cs_off, false);
1613
1614 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1615 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1616
1617 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1618 trace_spi_transfer_start(msg, xfer);
1619
1620 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1621 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1622
1623 if (!ctlr->ptp_sts_supported) {
1624 xfer->ptp_sts_word_pre = 0;
1625 ptp_read_system_prets(xfer->ptp_sts);
1626 }
1627
1628 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1629 reinit_completion(&ctlr->xfer_completion);
1630
1631fallback_pio:
1632 spi_dma_sync_for_device(ctlr, xfer);
1633 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1634 if (ret < 0) {
1635 spi_dma_sync_for_cpu(ctlr, xfer);
1636
1637 if (ctlr->cur_msg_mapped &&
1638 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1639 __spi_unmap_msg(ctlr, msg);
1640 ctlr->fallback = true;
1641 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1642 goto fallback_pio;
1643 }
1644
1645 SPI_STATISTICS_INCREMENT_FIELD(statm,
1646 errors);
1647 SPI_STATISTICS_INCREMENT_FIELD(stats,
1648 errors);
1649 dev_err(&msg->spi->dev,
1650 "SPI transfer failed: %d\n", ret);
1651 goto out;
1652 }
1653
1654 if (ret > 0) {
1655 ret = spi_transfer_wait(ctlr, msg, xfer);
1656 if (ret < 0)
1657 msg->status = ret;
1658 }
1659
1660 spi_dma_sync_for_cpu(ctlr, xfer);
1661 } else {
1662 if (xfer->len)
1663 dev_err(&msg->spi->dev,
1664 "Bufferless transfer has length %u\n",
1665 xfer->len);
1666 }
1667
1668 if (!ctlr->ptp_sts_supported) {
1669 ptp_read_system_postts(xfer->ptp_sts);
1670 xfer->ptp_sts_word_post = xfer->len;
1671 }
1672
1673 trace_spi_transfer_stop(msg, xfer);
1674
1675 if (msg->status != -EINPROGRESS)
1676 goto out;
1677
1678 spi_transfer_delay_exec(xfer);
1679
1680 if (xfer->cs_change) {
1681 if (list_is_last(&xfer->transfer_list,
1682 &msg->transfers)) {
1683 keep_cs = true;
1684 } else {
1685 if (!xfer->cs_off)
1686 spi_set_cs(msg->spi, false, false);
1687 _spi_transfer_cs_change_delay(msg, xfer);
1688 if (!list_next_entry(xfer, transfer_list)->cs_off)
1689 spi_set_cs(msg->spi, true, false);
1690 }
1691 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1692 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1693 spi_set_cs(msg->spi, xfer->cs_off, false);
1694 }
1695
1696 msg->actual_length += xfer->len;
1697 }
1698
1699out:
1700 if (ret != 0 || !keep_cs)
1701 spi_set_cs(msg->spi, false, false);
1702
1703 if (msg->status == -EINPROGRESS)
1704 msg->status = ret;
1705
1706 if (msg->status && ctlr->handle_err)
1707 ctlr->handle_err(ctlr, msg);
1708
1709 spi_finalize_current_message(ctlr);
1710
1711 return ret;
1712}
1713
1714/**
1715 * spi_finalize_current_transfer - report completion of a transfer
1716 * @ctlr: the controller reporting completion
1717 *
1718 * Called by SPI drivers using the core transfer_one_message()
1719 * implementation to notify it that the current interrupt driven
1720 * transfer has finished and the next one may be scheduled.
1721 */
1722void spi_finalize_current_transfer(struct spi_controller *ctlr)
1723{
1724 complete(&ctlr->xfer_completion);
1725}
1726EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1727
1728static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1729{
1730 if (ctlr->auto_runtime_pm) {
1731 pm_runtime_mark_last_busy(ctlr->dev.parent);
1732 pm_runtime_put_autosuspend(ctlr->dev.parent);
1733 }
1734}
1735
1736static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1737 struct spi_message *msg, bool was_busy)
1738{
1739 struct spi_transfer *xfer;
1740 int ret;
1741
1742 if (!was_busy && ctlr->auto_runtime_pm) {
1743 ret = pm_runtime_get_sync(ctlr->dev.parent);
1744 if (ret < 0) {
1745 pm_runtime_put_noidle(ctlr->dev.parent);
1746 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1747 ret);
1748
1749 msg->status = ret;
1750 spi_finalize_current_message(ctlr);
1751
1752 return ret;
1753 }
1754 }
1755
1756 if (!was_busy)
1757 trace_spi_controller_busy(ctlr);
1758
1759 if (!was_busy && ctlr->prepare_transfer_hardware) {
1760 ret = ctlr->prepare_transfer_hardware(ctlr);
1761 if (ret) {
1762 dev_err(&ctlr->dev,
1763 "failed to prepare transfer hardware: %d\n",
1764 ret);
1765
1766 if (ctlr->auto_runtime_pm)
1767 pm_runtime_put(ctlr->dev.parent);
1768
1769 msg->status = ret;
1770 spi_finalize_current_message(ctlr);
1771
1772 return ret;
1773 }
1774 }
1775
1776 trace_spi_message_start(msg);
1777
1778 if (ctlr->prepare_message) {
1779 ret = ctlr->prepare_message(ctlr, msg);
1780 if (ret) {
1781 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1782 ret);
1783 msg->status = ret;
1784 spi_finalize_current_message(ctlr);
1785 return ret;
1786 }
1787 msg->prepared = true;
1788 }
1789
1790 ret = spi_map_msg(ctlr, msg);
1791 if (ret) {
1792 msg->status = ret;
1793 spi_finalize_current_message(ctlr);
1794 return ret;
1795 }
1796
1797 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1798 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1799 xfer->ptp_sts_word_pre = 0;
1800 ptp_read_system_prets(xfer->ptp_sts);
1801 }
1802 }
1803
1804 /*
1805 * Drivers implementation of transfer_one_message() must arrange for
1806 * spi_finalize_current_message() to get called. Most drivers will do
1807 * this in the calling context, but some don't. For those cases, a
1808 * completion is used to guarantee that this function does not return
1809 * until spi_finalize_current_message() is done accessing
1810 * ctlr->cur_msg.
1811 * Use of the following two flags enable to opportunistically skip the
1812 * use of the completion since its use involves expensive spin locks.
1813 * In case of a race with the context that calls
1814 * spi_finalize_current_message() the completion will always be used,
1815 * due to strict ordering of these flags using barriers.
1816 */
1817 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1818 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1819 reinit_completion(&ctlr->cur_msg_completion);
1820 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1821
1822 ret = ctlr->transfer_one_message(ctlr, msg);
1823 if (ret) {
1824 dev_err(&ctlr->dev,
1825 "failed to transfer one message from queue\n");
1826 return ret;
1827 }
1828
1829 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1830 smp_mb(); /* See spi_finalize_current_message()... */
1831 if (READ_ONCE(ctlr->cur_msg_incomplete))
1832 wait_for_completion(&ctlr->cur_msg_completion);
1833
1834 return 0;
1835}
1836
1837/**
1838 * __spi_pump_messages - function which processes SPI message queue
1839 * @ctlr: controller to process queue for
1840 * @in_kthread: true if we are in the context of the message pump thread
1841 *
1842 * This function checks if there is any SPI message in the queue that
1843 * needs processing and if so call out to the driver to initialize hardware
1844 * and transfer each message.
1845 *
1846 * Note that it is called both from the kthread itself and also from
1847 * inside spi_sync(); the queue extraction handling at the top of the
1848 * function should deal with this safely.
1849 */
1850static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1851{
1852 struct spi_message *msg;
1853 bool was_busy = false;
1854 unsigned long flags;
1855 int ret;
1856
1857 /* Take the I/O mutex */
1858 mutex_lock(&ctlr->io_mutex);
1859
1860 /* Lock queue */
1861 spin_lock_irqsave(&ctlr->queue_lock, flags);
1862
1863 /* Make sure we are not already running a message */
1864 if (ctlr->cur_msg)
1865 goto out_unlock;
1866
1867 /* Check if the queue is idle */
1868 if (list_empty(&ctlr->queue) || !ctlr->running) {
1869 if (!ctlr->busy)
1870 goto out_unlock;
1871
1872 /* Defer any non-atomic teardown to the thread */
1873 if (!in_kthread) {
1874 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1875 !ctlr->unprepare_transfer_hardware) {
1876 spi_idle_runtime_pm(ctlr);
1877 ctlr->busy = false;
1878 ctlr->queue_empty = true;
1879 trace_spi_controller_idle(ctlr);
1880 } else {
1881 kthread_queue_work(ctlr->kworker,
1882 &ctlr->pump_messages);
1883 }
1884 goto out_unlock;
1885 }
1886
1887 ctlr->busy = false;
1888 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1889
1890 kfree(ctlr->dummy_rx);
1891 ctlr->dummy_rx = NULL;
1892 kfree(ctlr->dummy_tx);
1893 ctlr->dummy_tx = NULL;
1894 if (ctlr->unprepare_transfer_hardware &&
1895 ctlr->unprepare_transfer_hardware(ctlr))
1896 dev_err(&ctlr->dev,
1897 "failed to unprepare transfer hardware\n");
1898 spi_idle_runtime_pm(ctlr);
1899 trace_spi_controller_idle(ctlr);
1900
1901 spin_lock_irqsave(&ctlr->queue_lock, flags);
1902 ctlr->queue_empty = true;
1903 goto out_unlock;
1904 }
1905
1906 /* Extract head of queue */
1907 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1908 ctlr->cur_msg = msg;
1909
1910 list_del_init(&msg->queue);
1911 if (ctlr->busy)
1912 was_busy = true;
1913 else
1914 ctlr->busy = true;
1915 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1916
1917 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1918 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1919
1920 ctlr->cur_msg = NULL;
1921 ctlr->fallback = false;
1922
1923 mutex_unlock(&ctlr->io_mutex);
1924
1925 /* Prod the scheduler in case transfer_one() was busy waiting */
1926 if (!ret)
1927 cond_resched();
1928 return;
1929
1930out_unlock:
1931 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1932 mutex_unlock(&ctlr->io_mutex);
1933}
1934
1935/**
1936 * spi_pump_messages - kthread work function which processes spi message queue
1937 * @work: pointer to kthread work struct contained in the controller struct
1938 */
1939static void spi_pump_messages(struct kthread_work *work)
1940{
1941 struct spi_controller *ctlr =
1942 container_of(work, struct spi_controller, pump_messages);
1943
1944 __spi_pump_messages(ctlr, true);
1945}
1946
1947/**
1948 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1949 * @ctlr: Pointer to the spi_controller structure of the driver
1950 * @xfer: Pointer to the transfer being timestamped
1951 * @progress: How many words (not bytes) have been transferred so far
1952 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1953 * transfer, for less jitter in time measurement. Only compatible
1954 * with PIO drivers. If true, must follow up with
1955 * spi_take_timestamp_post or otherwise system will crash.
1956 * WARNING: for fully predictable results, the CPU frequency must
1957 * also be under control (governor).
1958 *
1959 * This is a helper for drivers to collect the beginning of the TX timestamp
1960 * for the requested byte from the SPI transfer. The frequency with which this
1961 * function must be called (once per word, once for the whole transfer, once
1962 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1963 * greater than or equal to the requested byte at the time of the call. The
1964 * timestamp is only taken once, at the first such call. It is assumed that
1965 * the driver advances its @tx buffer pointer monotonically.
1966 */
1967void spi_take_timestamp_pre(struct spi_controller *ctlr,
1968 struct spi_transfer *xfer,
1969 size_t progress, bool irqs_off)
1970{
1971 if (!xfer->ptp_sts)
1972 return;
1973
1974 if (xfer->timestamped)
1975 return;
1976
1977 if (progress > xfer->ptp_sts_word_pre)
1978 return;
1979
1980 /* Capture the resolution of the timestamp */
1981 xfer->ptp_sts_word_pre = progress;
1982
1983 if (irqs_off) {
1984 local_irq_save(ctlr->irq_flags);
1985 preempt_disable();
1986 }
1987
1988 ptp_read_system_prets(xfer->ptp_sts);
1989}
1990EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1991
1992/**
1993 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1994 * @ctlr: Pointer to the spi_controller structure of the driver
1995 * @xfer: Pointer to the transfer being timestamped
1996 * @progress: How many words (not bytes) have been transferred so far
1997 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1998 *
1999 * This is a helper for drivers to collect the end of the TX timestamp for
2000 * the requested byte from the SPI transfer. Can be called with an arbitrary
2001 * frequency: only the first call where @tx exceeds or is equal to the
2002 * requested word will be timestamped.
2003 */
2004void spi_take_timestamp_post(struct spi_controller *ctlr,
2005 struct spi_transfer *xfer,
2006 size_t progress, bool irqs_off)
2007{
2008 if (!xfer->ptp_sts)
2009 return;
2010
2011 if (xfer->timestamped)
2012 return;
2013
2014 if (progress < xfer->ptp_sts_word_post)
2015 return;
2016
2017 ptp_read_system_postts(xfer->ptp_sts);
2018
2019 if (irqs_off) {
2020 local_irq_restore(ctlr->irq_flags);
2021 preempt_enable();
2022 }
2023
2024 /* Capture the resolution of the timestamp */
2025 xfer->ptp_sts_word_post = progress;
2026
2027 xfer->timestamped = 1;
2028}
2029EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2030
2031/**
2032 * spi_set_thread_rt - set the controller to pump at realtime priority
2033 * @ctlr: controller to boost priority of
2034 *
2035 * This can be called because the controller requested realtime priority
2036 * (by setting the ->rt value before calling spi_register_controller()) or
2037 * because a device on the bus said that its transfers needed realtime
2038 * priority.
2039 *
2040 * NOTE: at the moment if any device on a bus says it needs realtime then
2041 * the thread will be at realtime priority for all transfers on that
2042 * controller. If this eventually becomes a problem we may see if we can
2043 * find a way to boost the priority only temporarily during relevant
2044 * transfers.
2045 */
2046static void spi_set_thread_rt(struct spi_controller *ctlr)
2047{
2048 dev_info(&ctlr->dev,
2049 "will run message pump with realtime priority\n");
2050 sched_set_fifo(ctlr->kworker->task);
2051}
2052
2053static int spi_init_queue(struct spi_controller *ctlr)
2054{
2055 ctlr->running = false;
2056 ctlr->busy = false;
2057 ctlr->queue_empty = true;
2058
2059 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
2060 if (IS_ERR(ctlr->kworker)) {
2061 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2062 return PTR_ERR(ctlr->kworker);
2063 }
2064
2065 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2066
2067 /*
2068 * Controller config will indicate if this controller should run the
2069 * message pump with high (realtime) priority to reduce the transfer
2070 * latency on the bus by minimising the delay between a transfer
2071 * request and the scheduling of the message pump thread. Without this
2072 * setting the message pump thread will remain at default priority.
2073 */
2074 if (ctlr->rt)
2075 spi_set_thread_rt(ctlr);
2076
2077 return 0;
2078}
2079
2080/**
2081 * spi_get_next_queued_message() - called by driver to check for queued
2082 * messages
2083 * @ctlr: the controller to check for queued messages
2084 *
2085 * If there are more messages in the queue, the next message is returned from
2086 * this call.
2087 *
2088 * Return: the next message in the queue, else NULL if the queue is empty.
2089 */
2090struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2091{
2092 struct spi_message *next;
2093 unsigned long flags;
2094
2095 /* Get a pointer to the next message, if any */
2096 spin_lock_irqsave(&ctlr->queue_lock, flags);
2097 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2098 queue);
2099 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2100
2101 return next;
2102}
2103EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2104
2105/*
2106 * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2107 * and spi_maybe_unoptimize_message()
2108 * @msg: the message to unoptimize
2109 *
2110 * Peripheral drivers should use spi_unoptimize_message() and callers inside
2111 * core should use spi_maybe_unoptimize_message() rather than calling this
2112 * function directly.
2113 *
2114 * It is not valid to call this on a message that is not currently optimized.
2115 */
2116static void __spi_unoptimize_message(struct spi_message *msg)
2117{
2118 struct spi_controller *ctlr = msg->spi->controller;
2119
2120 if (ctlr->unoptimize_message)
2121 ctlr->unoptimize_message(msg);
2122
2123 spi_res_release(ctlr, msg);
2124
2125 msg->optimized = false;
2126 msg->opt_state = NULL;
2127}
2128
2129/*
2130 * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2131 * @msg: the message to unoptimize
2132 *
2133 * This function is used to unoptimize a message if and only if it was
2134 * optimized by the core (via spi_maybe_optimize_message()).
2135 */
2136static void spi_maybe_unoptimize_message(struct spi_message *msg)
2137{
2138 if (!msg->pre_optimized && msg->optimized)
2139 __spi_unoptimize_message(msg);
2140}
2141
2142/**
2143 * spi_finalize_current_message() - the current message is complete
2144 * @ctlr: the controller to return the message to
2145 *
2146 * Called by the driver to notify the core that the message in the front of the
2147 * queue is complete and can be removed from the queue.
2148 */
2149void spi_finalize_current_message(struct spi_controller *ctlr)
2150{
2151 struct spi_transfer *xfer;
2152 struct spi_message *mesg;
2153 int ret;
2154
2155 mesg = ctlr->cur_msg;
2156
2157 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2158 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2159 ptp_read_system_postts(xfer->ptp_sts);
2160 xfer->ptp_sts_word_post = xfer->len;
2161 }
2162 }
2163
2164 if (unlikely(ctlr->ptp_sts_supported))
2165 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2166 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2167
2168 spi_unmap_msg(ctlr, mesg);
2169
2170 if (mesg->prepared && ctlr->unprepare_message) {
2171 ret = ctlr->unprepare_message(ctlr, mesg);
2172 if (ret) {
2173 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2174 ret);
2175 }
2176 }
2177
2178 mesg->prepared = false;
2179
2180 spi_maybe_unoptimize_message(mesg);
2181
2182 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2183 smp_mb(); /* See __spi_pump_transfer_message()... */
2184 if (READ_ONCE(ctlr->cur_msg_need_completion))
2185 complete(&ctlr->cur_msg_completion);
2186
2187 trace_spi_message_done(mesg);
2188
2189 mesg->state = NULL;
2190 if (mesg->complete)
2191 mesg->complete(mesg->context);
2192}
2193EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2194
2195static int spi_start_queue(struct spi_controller *ctlr)
2196{
2197 unsigned long flags;
2198
2199 spin_lock_irqsave(&ctlr->queue_lock, flags);
2200
2201 if (ctlr->running || ctlr->busy) {
2202 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2203 return -EBUSY;
2204 }
2205
2206 ctlr->running = true;
2207 ctlr->cur_msg = NULL;
2208 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2209
2210 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2211
2212 return 0;
2213}
2214
2215static int spi_stop_queue(struct spi_controller *ctlr)
2216{
2217 unsigned long flags;
2218 unsigned limit = 500;
2219 int ret = 0;
2220
2221 spin_lock_irqsave(&ctlr->queue_lock, flags);
2222
2223 /*
2224 * This is a bit lame, but is optimized for the common execution path.
2225 * A wait_queue on the ctlr->busy could be used, but then the common
2226 * execution path (pump_messages) would be required to call wake_up or
2227 * friends on every SPI message. Do this instead.
2228 */
2229 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2230 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2231 usleep_range(10000, 11000);
2232 spin_lock_irqsave(&ctlr->queue_lock, flags);
2233 }
2234
2235 if (!list_empty(&ctlr->queue) || ctlr->busy)
2236 ret = -EBUSY;
2237 else
2238 ctlr->running = false;
2239
2240 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2241
2242 return ret;
2243}
2244
2245static int spi_destroy_queue(struct spi_controller *ctlr)
2246{
2247 int ret;
2248
2249 ret = spi_stop_queue(ctlr);
2250
2251 /*
2252 * kthread_flush_worker will block until all work is done.
2253 * If the reason that stop_queue timed out is that the work will never
2254 * finish, then it does no good to call flush/stop thread, so
2255 * return anyway.
2256 */
2257 if (ret) {
2258 dev_err(&ctlr->dev, "problem destroying queue\n");
2259 return ret;
2260 }
2261
2262 kthread_destroy_worker(ctlr->kworker);
2263
2264 return 0;
2265}
2266
2267static int __spi_queued_transfer(struct spi_device *spi,
2268 struct spi_message *msg,
2269 bool need_pump)
2270{
2271 struct spi_controller *ctlr = spi->controller;
2272 unsigned long flags;
2273
2274 spin_lock_irqsave(&ctlr->queue_lock, flags);
2275
2276 if (!ctlr->running) {
2277 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2278 return -ESHUTDOWN;
2279 }
2280 msg->actual_length = 0;
2281 msg->status = -EINPROGRESS;
2282
2283 list_add_tail(&msg->queue, &ctlr->queue);
2284 ctlr->queue_empty = false;
2285 if (!ctlr->busy && need_pump)
2286 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2287
2288 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2289 return 0;
2290}
2291
2292/**
2293 * spi_queued_transfer - transfer function for queued transfers
2294 * @spi: SPI device which is requesting transfer
2295 * @msg: SPI message which is to handled is queued to driver queue
2296 *
2297 * Return: zero on success, else a negative error code.
2298 */
2299static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2300{
2301 return __spi_queued_transfer(spi, msg, true);
2302}
2303
2304static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2305{
2306 int ret;
2307
2308 ctlr->transfer = spi_queued_transfer;
2309 if (!ctlr->transfer_one_message)
2310 ctlr->transfer_one_message = spi_transfer_one_message;
2311
2312 /* Initialize and start queue */
2313 ret = spi_init_queue(ctlr);
2314 if (ret) {
2315 dev_err(&ctlr->dev, "problem initializing queue\n");
2316 goto err_init_queue;
2317 }
2318 ctlr->queued = true;
2319 ret = spi_start_queue(ctlr);
2320 if (ret) {
2321 dev_err(&ctlr->dev, "problem starting queue\n");
2322 goto err_start_queue;
2323 }
2324
2325 return 0;
2326
2327err_start_queue:
2328 spi_destroy_queue(ctlr);
2329err_init_queue:
2330 return ret;
2331}
2332
2333/**
2334 * spi_flush_queue - Send all pending messages in the queue from the callers'
2335 * context
2336 * @ctlr: controller to process queue for
2337 *
2338 * This should be used when one wants to ensure all pending messages have been
2339 * sent before doing something. Is used by the spi-mem code to make sure SPI
2340 * memory operations do not preempt regular SPI transfers that have been queued
2341 * before the spi-mem operation.
2342 */
2343void spi_flush_queue(struct spi_controller *ctlr)
2344{
2345 if (ctlr->transfer == spi_queued_transfer)
2346 __spi_pump_messages(ctlr, false);
2347}
2348
2349/*-------------------------------------------------------------------------*/
2350
2351#if defined(CONFIG_OF)
2352static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2353 struct spi_delay *delay, const char *prop)
2354{
2355 u32 value;
2356
2357 if (!of_property_read_u32(nc, prop, &value)) {
2358 if (value > U16_MAX) {
2359 delay->value = DIV_ROUND_UP(value, 1000);
2360 delay->unit = SPI_DELAY_UNIT_USECS;
2361 } else {
2362 delay->value = value;
2363 delay->unit = SPI_DELAY_UNIT_NSECS;
2364 }
2365 }
2366}
2367
2368static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2369 struct device_node *nc)
2370{
2371 u32 value, cs[SPI_CS_CNT_MAX];
2372 int rc, idx;
2373
2374 /* Mode (clock phase/polarity/etc.) */
2375 if (of_property_read_bool(nc, "spi-cpha"))
2376 spi->mode |= SPI_CPHA;
2377 if (of_property_read_bool(nc, "spi-cpol"))
2378 spi->mode |= SPI_CPOL;
2379 if (of_property_read_bool(nc, "spi-3wire"))
2380 spi->mode |= SPI_3WIRE;
2381 if (of_property_read_bool(nc, "spi-lsb-first"))
2382 spi->mode |= SPI_LSB_FIRST;
2383 if (of_property_read_bool(nc, "spi-cs-high"))
2384 spi->mode |= SPI_CS_HIGH;
2385
2386 /* Device DUAL/QUAD mode */
2387 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2388 switch (value) {
2389 case 0:
2390 spi->mode |= SPI_NO_TX;
2391 break;
2392 case 1:
2393 break;
2394 case 2:
2395 spi->mode |= SPI_TX_DUAL;
2396 break;
2397 case 4:
2398 spi->mode |= SPI_TX_QUAD;
2399 break;
2400 case 8:
2401 spi->mode |= SPI_TX_OCTAL;
2402 break;
2403 default:
2404 dev_warn(&ctlr->dev,
2405 "spi-tx-bus-width %d not supported\n",
2406 value);
2407 break;
2408 }
2409 }
2410
2411 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2412 switch (value) {
2413 case 0:
2414 spi->mode |= SPI_NO_RX;
2415 break;
2416 case 1:
2417 break;
2418 case 2:
2419 spi->mode |= SPI_RX_DUAL;
2420 break;
2421 case 4:
2422 spi->mode |= SPI_RX_QUAD;
2423 break;
2424 case 8:
2425 spi->mode |= SPI_RX_OCTAL;
2426 break;
2427 default:
2428 dev_warn(&ctlr->dev,
2429 "spi-rx-bus-width %d not supported\n",
2430 value);
2431 break;
2432 }
2433 }
2434
2435 if (spi_controller_is_slave(ctlr)) {
2436 if (!of_node_name_eq(nc, "slave")) {
2437 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2438 nc);
2439 return -EINVAL;
2440 }
2441 return 0;
2442 }
2443
2444 if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2445 dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2446 return -EINVAL;
2447 }
2448
2449 spi_set_all_cs_unused(spi);
2450
2451 /* Device address */
2452 rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2453 SPI_CS_CNT_MAX);
2454 if (rc < 0) {
2455 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2456 nc, rc);
2457 return rc;
2458 }
2459 if (rc > ctlr->num_chipselect) {
2460 dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2461 nc, rc);
2462 return rc;
2463 }
2464 if ((of_property_read_bool(nc, "parallel-memories")) &&
2465 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2466 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2467 return -EINVAL;
2468 }
2469 for (idx = 0; idx < rc; idx++)
2470 spi_set_chipselect(spi, idx, cs[idx]);
2471
2472 /*
2473 * By default spi->chip_select[0] will hold the physical CS number,
2474 * so set bit 0 in spi->cs_index_mask.
2475 */
2476 spi->cs_index_mask = BIT(0);
2477
2478 /* Device speed */
2479 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2480 spi->max_speed_hz = value;
2481
2482 /* Device CS delays */
2483 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2484 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2485 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2486
2487 return 0;
2488}
2489
2490static struct spi_device *
2491of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2492{
2493 struct spi_device *spi;
2494 int rc;
2495
2496 /* Alloc an spi_device */
2497 spi = spi_alloc_device(ctlr);
2498 if (!spi) {
2499 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2500 rc = -ENOMEM;
2501 goto err_out;
2502 }
2503
2504 /* Select device driver */
2505 rc = of_alias_from_compatible(nc, spi->modalias,
2506 sizeof(spi->modalias));
2507 if (rc < 0) {
2508 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2509 goto err_out;
2510 }
2511
2512 rc = of_spi_parse_dt(ctlr, spi, nc);
2513 if (rc)
2514 goto err_out;
2515
2516 /* Store a pointer to the node in the device structure */
2517 of_node_get(nc);
2518
2519 device_set_node(&spi->dev, of_fwnode_handle(nc));
2520
2521 /* Register the new device */
2522 rc = spi_add_device(spi);
2523 if (rc) {
2524 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2525 goto err_of_node_put;
2526 }
2527
2528 return spi;
2529
2530err_of_node_put:
2531 of_node_put(nc);
2532err_out:
2533 spi_dev_put(spi);
2534 return ERR_PTR(rc);
2535}
2536
2537/**
2538 * of_register_spi_devices() - Register child devices onto the SPI bus
2539 * @ctlr: Pointer to spi_controller device
2540 *
2541 * Registers an spi_device for each child node of controller node which
2542 * represents a valid SPI slave.
2543 */
2544static void of_register_spi_devices(struct spi_controller *ctlr)
2545{
2546 struct spi_device *spi;
2547 struct device_node *nc;
2548
2549 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2550 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2551 continue;
2552 spi = of_register_spi_device(ctlr, nc);
2553 if (IS_ERR(spi)) {
2554 dev_warn(&ctlr->dev,
2555 "Failed to create SPI device for %pOF\n", nc);
2556 of_node_clear_flag(nc, OF_POPULATED);
2557 }
2558 }
2559}
2560#else
2561static void of_register_spi_devices(struct spi_controller *ctlr) { }
2562#endif
2563
2564/**
2565 * spi_new_ancillary_device() - Register ancillary SPI device
2566 * @spi: Pointer to the main SPI device registering the ancillary device
2567 * @chip_select: Chip Select of the ancillary device
2568 *
2569 * Register an ancillary SPI device; for example some chips have a chip-select
2570 * for normal device usage and another one for setup/firmware upload.
2571 *
2572 * This may only be called from main SPI device's probe routine.
2573 *
2574 * Return: 0 on success; negative errno on failure
2575 */
2576struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2577 u8 chip_select)
2578{
2579 struct spi_controller *ctlr = spi->controller;
2580 struct spi_device *ancillary;
2581 int rc = 0;
2582
2583 /* Alloc an spi_device */
2584 ancillary = spi_alloc_device(ctlr);
2585 if (!ancillary) {
2586 rc = -ENOMEM;
2587 goto err_out;
2588 }
2589
2590 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2591
2592 /* Use provided chip-select for ancillary device */
2593 spi_set_all_cs_unused(ancillary);
2594 spi_set_chipselect(ancillary, 0, chip_select);
2595
2596 /* Take over SPI mode/speed from SPI main device */
2597 ancillary->max_speed_hz = spi->max_speed_hz;
2598 ancillary->mode = spi->mode;
2599 /*
2600 * By default spi->chip_select[0] will hold the physical CS number,
2601 * so set bit 0 in spi->cs_index_mask.
2602 */
2603 ancillary->cs_index_mask = BIT(0);
2604
2605 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2606
2607 /* Register the new device */
2608 rc = __spi_add_device(ancillary);
2609 if (rc) {
2610 dev_err(&spi->dev, "failed to register ancillary device\n");
2611 goto err_out;
2612 }
2613
2614 return ancillary;
2615
2616err_out:
2617 spi_dev_put(ancillary);
2618 return ERR_PTR(rc);
2619}
2620EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2621
2622#ifdef CONFIG_ACPI
2623struct acpi_spi_lookup {
2624 struct spi_controller *ctlr;
2625 u32 max_speed_hz;
2626 u32 mode;
2627 int irq;
2628 u8 bits_per_word;
2629 u8 chip_select;
2630 int n;
2631 int index;
2632};
2633
2634static int acpi_spi_count(struct acpi_resource *ares, void *data)
2635{
2636 struct acpi_resource_spi_serialbus *sb;
2637 int *count = data;
2638
2639 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2640 return 1;
2641
2642 sb = &ares->data.spi_serial_bus;
2643 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2644 return 1;
2645
2646 *count = *count + 1;
2647
2648 return 1;
2649}
2650
2651/**
2652 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2653 * @adev: ACPI device
2654 *
2655 * Return: the number of SpiSerialBus resources in the ACPI-device's
2656 * resource-list; or a negative error code.
2657 */
2658int acpi_spi_count_resources(struct acpi_device *adev)
2659{
2660 LIST_HEAD(r);
2661 int count = 0;
2662 int ret;
2663
2664 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2665 if (ret < 0)
2666 return ret;
2667
2668 acpi_dev_free_resource_list(&r);
2669
2670 return count;
2671}
2672EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2673
2674static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2675 struct acpi_spi_lookup *lookup)
2676{
2677 const union acpi_object *obj;
2678
2679 if (!x86_apple_machine)
2680 return;
2681
2682 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2683 && obj->buffer.length >= 4)
2684 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2685
2686 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2687 && obj->buffer.length == 8)
2688 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2689
2690 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2691 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2692 lookup->mode |= SPI_LSB_FIRST;
2693
2694 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2695 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2696 lookup->mode |= SPI_CPOL;
2697
2698 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2699 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2700 lookup->mode |= SPI_CPHA;
2701}
2702
2703static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2704{
2705 struct acpi_spi_lookup *lookup = data;
2706 struct spi_controller *ctlr = lookup->ctlr;
2707
2708 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2709 struct acpi_resource_spi_serialbus *sb;
2710 acpi_handle parent_handle;
2711 acpi_status status;
2712
2713 sb = &ares->data.spi_serial_bus;
2714 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2715
2716 if (lookup->index != -1 && lookup->n++ != lookup->index)
2717 return 1;
2718
2719 status = acpi_get_handle(NULL,
2720 sb->resource_source.string_ptr,
2721 &parent_handle);
2722
2723 if (ACPI_FAILURE(status))
2724 return -ENODEV;
2725
2726 if (ctlr) {
2727 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2728 return -ENODEV;
2729 } else {
2730 struct acpi_device *adev;
2731
2732 adev = acpi_fetch_acpi_dev(parent_handle);
2733 if (!adev)
2734 return -ENODEV;
2735
2736 ctlr = acpi_spi_find_controller_by_adev(adev);
2737 if (!ctlr)
2738 return -EPROBE_DEFER;
2739
2740 lookup->ctlr = ctlr;
2741 }
2742
2743 /*
2744 * ACPI DeviceSelection numbering is handled by the
2745 * host controller driver in Windows and can vary
2746 * from driver to driver. In Linux we always expect
2747 * 0 .. max - 1 so we need to ask the driver to
2748 * translate between the two schemes.
2749 */
2750 if (ctlr->fw_translate_cs) {
2751 int cs = ctlr->fw_translate_cs(ctlr,
2752 sb->device_selection);
2753 if (cs < 0)
2754 return cs;
2755 lookup->chip_select = cs;
2756 } else {
2757 lookup->chip_select = sb->device_selection;
2758 }
2759
2760 lookup->max_speed_hz = sb->connection_speed;
2761 lookup->bits_per_word = sb->data_bit_length;
2762
2763 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2764 lookup->mode |= SPI_CPHA;
2765 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2766 lookup->mode |= SPI_CPOL;
2767 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2768 lookup->mode |= SPI_CS_HIGH;
2769 }
2770 } else if (lookup->irq < 0) {
2771 struct resource r;
2772
2773 if (acpi_dev_resource_interrupt(ares, 0, &r))
2774 lookup->irq = r.start;
2775 }
2776
2777 /* Always tell the ACPI core to skip this resource */
2778 return 1;
2779}
2780
2781/**
2782 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2783 * @ctlr: controller to which the spi device belongs
2784 * @adev: ACPI Device for the spi device
2785 * @index: Index of the spi resource inside the ACPI Node
2786 *
2787 * This should be used to allocate a new SPI device from and ACPI Device node.
2788 * The caller is responsible for calling spi_add_device to register the SPI device.
2789 *
2790 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2791 * using the resource.
2792 * If index is set to -1, index is not used.
2793 * Note: If index is -1, ctlr must be set.
2794 *
2795 * Return: a pointer to the new device, or ERR_PTR on error.
2796 */
2797struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2798 struct acpi_device *adev,
2799 int index)
2800{
2801 acpi_handle parent_handle = NULL;
2802 struct list_head resource_list;
2803 struct acpi_spi_lookup lookup = {};
2804 struct spi_device *spi;
2805 int ret;
2806
2807 if (!ctlr && index == -1)
2808 return ERR_PTR(-EINVAL);
2809
2810 lookup.ctlr = ctlr;
2811 lookup.irq = -1;
2812 lookup.index = index;
2813 lookup.n = 0;
2814
2815 INIT_LIST_HEAD(&resource_list);
2816 ret = acpi_dev_get_resources(adev, &resource_list,
2817 acpi_spi_add_resource, &lookup);
2818 acpi_dev_free_resource_list(&resource_list);
2819
2820 if (ret < 0)
2821 /* Found SPI in _CRS but it points to another controller */
2822 return ERR_PTR(ret);
2823
2824 if (!lookup.max_speed_hz &&
2825 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2826 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2827 /* Apple does not use _CRS but nested devices for SPI slaves */
2828 acpi_spi_parse_apple_properties(adev, &lookup);
2829 }
2830
2831 if (!lookup.max_speed_hz)
2832 return ERR_PTR(-ENODEV);
2833
2834 spi = spi_alloc_device(lookup.ctlr);
2835 if (!spi) {
2836 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2837 dev_name(&adev->dev));
2838 return ERR_PTR(-ENOMEM);
2839 }
2840
2841 spi_set_all_cs_unused(spi);
2842 spi_set_chipselect(spi, 0, lookup.chip_select);
2843
2844 ACPI_COMPANION_SET(&spi->dev, adev);
2845 spi->max_speed_hz = lookup.max_speed_hz;
2846 spi->mode |= lookup.mode;
2847 spi->irq = lookup.irq;
2848 spi->bits_per_word = lookup.bits_per_word;
2849 /*
2850 * By default spi->chip_select[0] will hold the physical CS number,
2851 * so set bit 0 in spi->cs_index_mask.
2852 */
2853 spi->cs_index_mask = BIT(0);
2854
2855 return spi;
2856}
2857EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2858
2859static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2860 struct acpi_device *adev)
2861{
2862 struct spi_device *spi;
2863
2864 if (acpi_bus_get_status(adev) || !adev->status.present ||
2865 acpi_device_enumerated(adev))
2866 return AE_OK;
2867
2868 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2869 if (IS_ERR(spi)) {
2870 if (PTR_ERR(spi) == -ENOMEM)
2871 return AE_NO_MEMORY;
2872 else
2873 return AE_OK;
2874 }
2875
2876 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2877 sizeof(spi->modalias));
2878
2879 if (spi->irq < 0)
2880 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2881
2882 acpi_device_set_enumerated(adev);
2883
2884 adev->power.flags.ignore_parent = true;
2885 if (spi_add_device(spi)) {
2886 adev->power.flags.ignore_parent = false;
2887 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2888 dev_name(&adev->dev));
2889 spi_dev_put(spi);
2890 }
2891
2892 return AE_OK;
2893}
2894
2895static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2896 void *data, void **return_value)
2897{
2898 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2899 struct spi_controller *ctlr = data;
2900
2901 if (!adev)
2902 return AE_OK;
2903
2904 return acpi_register_spi_device(ctlr, adev);
2905}
2906
2907#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2908
2909static void acpi_register_spi_devices(struct spi_controller *ctlr)
2910{
2911 acpi_status status;
2912 acpi_handle handle;
2913
2914 handle = ACPI_HANDLE(ctlr->dev.parent);
2915 if (!handle)
2916 return;
2917
2918 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2919 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2920 acpi_spi_add_device, NULL, ctlr, NULL);
2921 if (ACPI_FAILURE(status))
2922 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2923}
2924#else
2925static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2926#endif /* CONFIG_ACPI */
2927
2928static void spi_controller_release(struct device *dev)
2929{
2930 struct spi_controller *ctlr;
2931
2932 ctlr = container_of(dev, struct spi_controller, dev);
2933 kfree(ctlr);
2934}
2935
2936static struct class spi_master_class = {
2937 .name = "spi_master",
2938 .dev_release = spi_controller_release,
2939 .dev_groups = spi_master_groups,
2940};
2941
2942#ifdef CONFIG_SPI_SLAVE
2943/**
2944 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2945 * controller
2946 * @spi: device used for the current transfer
2947 */
2948int spi_slave_abort(struct spi_device *spi)
2949{
2950 struct spi_controller *ctlr = spi->controller;
2951
2952 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2953 return ctlr->slave_abort(ctlr);
2954
2955 return -ENOTSUPP;
2956}
2957EXPORT_SYMBOL_GPL(spi_slave_abort);
2958
2959int spi_target_abort(struct spi_device *spi)
2960{
2961 struct spi_controller *ctlr = spi->controller;
2962
2963 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2964 return ctlr->target_abort(ctlr);
2965
2966 return -ENOTSUPP;
2967}
2968EXPORT_SYMBOL_GPL(spi_target_abort);
2969
2970static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2971 char *buf)
2972{
2973 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2974 dev);
2975 struct device *child;
2976
2977 child = device_find_any_child(&ctlr->dev);
2978 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2979}
2980
2981static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2982 const char *buf, size_t count)
2983{
2984 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2985 dev);
2986 struct spi_device *spi;
2987 struct device *child;
2988 char name[32];
2989 int rc;
2990
2991 rc = sscanf(buf, "%31s", name);
2992 if (rc != 1 || !name[0])
2993 return -EINVAL;
2994
2995 child = device_find_any_child(&ctlr->dev);
2996 if (child) {
2997 /* Remove registered slave */
2998 device_unregister(child);
2999 put_device(child);
3000 }
3001
3002 if (strcmp(name, "(null)")) {
3003 /* Register new slave */
3004 spi = spi_alloc_device(ctlr);
3005 if (!spi)
3006 return -ENOMEM;
3007
3008 strscpy(spi->modalias, name, sizeof(spi->modalias));
3009
3010 rc = spi_add_device(spi);
3011 if (rc) {
3012 spi_dev_put(spi);
3013 return rc;
3014 }
3015 }
3016
3017 return count;
3018}
3019
3020static DEVICE_ATTR_RW(slave);
3021
3022static struct attribute *spi_slave_attrs[] = {
3023 &dev_attr_slave.attr,
3024 NULL,
3025};
3026
3027static const struct attribute_group spi_slave_group = {
3028 .attrs = spi_slave_attrs,
3029};
3030
3031static const struct attribute_group *spi_slave_groups[] = {
3032 &spi_controller_statistics_group,
3033 &spi_slave_group,
3034 NULL,
3035};
3036
3037static struct class spi_slave_class = {
3038 .name = "spi_slave",
3039 .dev_release = spi_controller_release,
3040 .dev_groups = spi_slave_groups,
3041};
3042#else
3043extern struct class spi_slave_class; /* dummy */
3044#endif
3045
3046/**
3047 * __spi_alloc_controller - allocate an SPI master or slave controller
3048 * @dev: the controller, possibly using the platform_bus
3049 * @size: how much zeroed driver-private data to allocate; the pointer to this
3050 * memory is in the driver_data field of the returned device, accessible
3051 * with spi_controller_get_devdata(); the memory is cacheline aligned;
3052 * drivers granting DMA access to portions of their private data need to
3053 * round up @size using ALIGN(size, dma_get_cache_alignment()).
3054 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
3055 * slave (true) controller
3056 * Context: can sleep
3057 *
3058 * This call is used only by SPI controller drivers, which are the
3059 * only ones directly touching chip registers. It's how they allocate
3060 * an spi_controller structure, prior to calling spi_register_controller().
3061 *
3062 * This must be called from context that can sleep.
3063 *
3064 * The caller is responsible for assigning the bus number and initializing the
3065 * controller's methods before calling spi_register_controller(); and (after
3066 * errors adding the device) calling spi_controller_put() to prevent a memory
3067 * leak.
3068 *
3069 * Return: the SPI controller structure on success, else NULL.
3070 */
3071struct spi_controller *__spi_alloc_controller(struct device *dev,
3072 unsigned int size, bool slave)
3073{
3074 struct spi_controller *ctlr;
3075 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3076
3077 if (!dev)
3078 return NULL;
3079
3080 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3081 if (!ctlr)
3082 return NULL;
3083
3084 device_initialize(&ctlr->dev);
3085 INIT_LIST_HEAD(&ctlr->queue);
3086 spin_lock_init(&ctlr->queue_lock);
3087 spin_lock_init(&ctlr->bus_lock_spinlock);
3088 mutex_init(&ctlr->bus_lock_mutex);
3089 mutex_init(&ctlr->io_mutex);
3090 mutex_init(&ctlr->add_lock);
3091 ctlr->bus_num = -1;
3092 ctlr->num_chipselect = 1;
3093 ctlr->slave = slave;
3094 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
3095 ctlr->dev.class = &spi_slave_class;
3096 else
3097 ctlr->dev.class = &spi_master_class;
3098 ctlr->dev.parent = dev;
3099 pm_suspend_ignore_children(&ctlr->dev, true);
3100 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3101
3102 return ctlr;
3103}
3104EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3105
3106static void devm_spi_release_controller(struct device *dev, void *ctlr)
3107{
3108 spi_controller_put(*(struct spi_controller **)ctlr);
3109}
3110
3111/**
3112 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3113 * @dev: physical device of SPI controller
3114 * @size: how much zeroed driver-private data to allocate
3115 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
3116 * Context: can sleep
3117 *
3118 * Allocate an SPI controller and automatically release a reference on it
3119 * when @dev is unbound from its driver. Drivers are thus relieved from
3120 * having to call spi_controller_put().
3121 *
3122 * The arguments to this function are identical to __spi_alloc_controller().
3123 *
3124 * Return: the SPI controller structure on success, else NULL.
3125 */
3126struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3127 unsigned int size,
3128 bool slave)
3129{
3130 struct spi_controller **ptr, *ctlr;
3131
3132 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3133 GFP_KERNEL);
3134 if (!ptr)
3135 return NULL;
3136
3137 ctlr = __spi_alloc_controller(dev, size, slave);
3138 if (ctlr) {
3139 ctlr->devm_allocated = true;
3140 *ptr = ctlr;
3141 devres_add(dev, ptr);
3142 } else {
3143 devres_free(ptr);
3144 }
3145
3146 return ctlr;
3147}
3148EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3149
3150/**
3151 * spi_get_gpio_descs() - grab chip select GPIOs for the master
3152 * @ctlr: The SPI master to grab GPIO descriptors for
3153 */
3154static int spi_get_gpio_descs(struct spi_controller *ctlr)
3155{
3156 int nb, i;
3157 struct gpio_desc **cs;
3158 struct device *dev = &ctlr->dev;
3159 unsigned long native_cs_mask = 0;
3160 unsigned int num_cs_gpios = 0;
3161
3162 nb = gpiod_count(dev, "cs");
3163 if (nb < 0) {
3164 /* No GPIOs at all is fine, else return the error */
3165 if (nb == -ENOENT)
3166 return 0;
3167 return nb;
3168 }
3169
3170 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3171
3172 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3173 GFP_KERNEL);
3174 if (!cs)
3175 return -ENOMEM;
3176 ctlr->cs_gpiods = cs;
3177
3178 for (i = 0; i < nb; i++) {
3179 /*
3180 * Most chipselects are active low, the inverted
3181 * semantics are handled by special quirks in gpiolib,
3182 * so initializing them GPIOD_OUT_LOW here means
3183 * "unasserted", in most cases this will drive the physical
3184 * line high.
3185 */
3186 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3187 GPIOD_OUT_LOW);
3188 if (IS_ERR(cs[i]))
3189 return PTR_ERR(cs[i]);
3190
3191 if (cs[i]) {
3192 /*
3193 * If we find a CS GPIO, name it after the device and
3194 * chip select line.
3195 */
3196 char *gpioname;
3197
3198 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3199 dev_name(dev), i);
3200 if (!gpioname)
3201 return -ENOMEM;
3202 gpiod_set_consumer_name(cs[i], gpioname);
3203 num_cs_gpios++;
3204 continue;
3205 }
3206
3207 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3208 dev_err(dev, "Invalid native chip select %d\n", i);
3209 return -EINVAL;
3210 }
3211 native_cs_mask |= BIT(i);
3212 }
3213
3214 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3215
3216 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3217 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3218 dev_err(dev, "No unused native chip select available\n");
3219 return -EINVAL;
3220 }
3221
3222 return 0;
3223}
3224
3225static int spi_controller_check_ops(struct spi_controller *ctlr)
3226{
3227 /*
3228 * The controller may implement only the high-level SPI-memory like
3229 * operations if it does not support regular SPI transfers, and this is
3230 * valid use case.
3231 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3232 * one of the ->transfer_xxx() method be implemented.
3233 */
3234 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3235 if (!ctlr->transfer && !ctlr->transfer_one &&
3236 !ctlr->transfer_one_message) {
3237 return -EINVAL;
3238 }
3239 }
3240
3241 return 0;
3242}
3243
3244/* Allocate dynamic bus number using Linux idr */
3245static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3246{
3247 int id;
3248
3249 mutex_lock(&board_lock);
3250 id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3251 mutex_unlock(&board_lock);
3252 if (WARN(id < 0, "couldn't get idr"))
3253 return id == -ENOSPC ? -EBUSY : id;
3254 ctlr->bus_num = id;
3255 return 0;
3256}
3257
3258/**
3259 * spi_register_controller - register SPI master or slave controller
3260 * @ctlr: initialized master, originally from spi_alloc_master() or
3261 * spi_alloc_slave()
3262 * Context: can sleep
3263 *
3264 * SPI controllers connect to their drivers using some non-SPI bus,
3265 * such as the platform bus. The final stage of probe() in that code
3266 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3267 *
3268 * SPI controllers use board specific (often SOC specific) bus numbers,
3269 * and board-specific addressing for SPI devices combines those numbers
3270 * with chip select numbers. Since SPI does not directly support dynamic
3271 * device identification, boards need configuration tables telling which
3272 * chip is at which address.
3273 *
3274 * This must be called from context that can sleep. It returns zero on
3275 * success, else a negative error code (dropping the controller's refcount).
3276 * After a successful return, the caller is responsible for calling
3277 * spi_unregister_controller().
3278 *
3279 * Return: zero on success, else a negative error code.
3280 */
3281int spi_register_controller(struct spi_controller *ctlr)
3282{
3283 struct device *dev = ctlr->dev.parent;
3284 struct boardinfo *bi;
3285 int first_dynamic;
3286 int status;
3287 int idx;
3288
3289 if (!dev)
3290 return -ENODEV;
3291
3292 /*
3293 * Make sure all necessary hooks are implemented before registering
3294 * the SPI controller.
3295 */
3296 status = spi_controller_check_ops(ctlr);
3297 if (status)
3298 return status;
3299
3300 if (ctlr->bus_num < 0)
3301 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3302 if (ctlr->bus_num >= 0) {
3303 /* Devices with a fixed bus num must check-in with the num */
3304 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3305 if (status)
3306 return status;
3307 }
3308 if (ctlr->bus_num < 0) {
3309 first_dynamic = of_alias_get_highest_id("spi");
3310 if (first_dynamic < 0)
3311 first_dynamic = 0;
3312 else
3313 first_dynamic++;
3314
3315 status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3316 if (status)
3317 return status;
3318 }
3319 ctlr->bus_lock_flag = 0;
3320 init_completion(&ctlr->xfer_completion);
3321 init_completion(&ctlr->cur_msg_completion);
3322 if (!ctlr->max_dma_len)
3323 ctlr->max_dma_len = INT_MAX;
3324
3325 /*
3326 * Register the device, then userspace will see it.
3327 * Registration fails if the bus ID is in use.
3328 */
3329 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3330
3331 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3332 status = spi_get_gpio_descs(ctlr);
3333 if (status)
3334 goto free_bus_id;
3335 /*
3336 * A controller using GPIO descriptors always
3337 * supports SPI_CS_HIGH if need be.
3338 */
3339 ctlr->mode_bits |= SPI_CS_HIGH;
3340 }
3341
3342 /*
3343 * Even if it's just one always-selected device, there must
3344 * be at least one chipselect.
3345 */
3346 if (!ctlr->num_chipselect) {
3347 status = -EINVAL;
3348 goto free_bus_id;
3349 }
3350
3351 /* Setting last_cs to SPI_INVALID_CS means no chip selected */
3352 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3353 ctlr->last_cs[idx] = SPI_INVALID_CS;
3354
3355 status = device_add(&ctlr->dev);
3356 if (status < 0)
3357 goto free_bus_id;
3358 dev_dbg(dev, "registered %s %s\n",
3359 spi_controller_is_slave(ctlr) ? "slave" : "master",
3360 dev_name(&ctlr->dev));
3361
3362 /*
3363 * If we're using a queued driver, start the queue. Note that we don't
3364 * need the queueing logic if the driver is only supporting high-level
3365 * memory operations.
3366 */
3367 if (ctlr->transfer) {
3368 dev_info(dev, "controller is unqueued, this is deprecated\n");
3369 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3370 status = spi_controller_initialize_queue(ctlr);
3371 if (status) {
3372 device_del(&ctlr->dev);
3373 goto free_bus_id;
3374 }
3375 }
3376 /* Add statistics */
3377 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3378 if (!ctlr->pcpu_statistics) {
3379 dev_err(dev, "Error allocating per-cpu statistics\n");
3380 status = -ENOMEM;
3381 goto destroy_queue;
3382 }
3383
3384 mutex_lock(&board_lock);
3385 list_add_tail(&ctlr->list, &spi_controller_list);
3386 list_for_each_entry(bi, &board_list, list)
3387 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3388 mutex_unlock(&board_lock);
3389
3390 /* Register devices from the device tree and ACPI */
3391 of_register_spi_devices(ctlr);
3392 acpi_register_spi_devices(ctlr);
3393 return status;
3394
3395destroy_queue:
3396 spi_destroy_queue(ctlr);
3397free_bus_id:
3398 mutex_lock(&board_lock);
3399 idr_remove(&spi_master_idr, ctlr->bus_num);
3400 mutex_unlock(&board_lock);
3401 return status;
3402}
3403EXPORT_SYMBOL_GPL(spi_register_controller);
3404
3405static void devm_spi_unregister(struct device *dev, void *res)
3406{
3407 spi_unregister_controller(*(struct spi_controller **)res);
3408}
3409
3410/**
3411 * devm_spi_register_controller - register managed SPI master or slave
3412 * controller
3413 * @dev: device managing SPI controller
3414 * @ctlr: initialized controller, originally from spi_alloc_master() or
3415 * spi_alloc_slave()
3416 * Context: can sleep
3417 *
3418 * Register a SPI device as with spi_register_controller() which will
3419 * automatically be unregistered and freed.
3420 *
3421 * Return: zero on success, else a negative error code.
3422 */
3423int devm_spi_register_controller(struct device *dev,
3424 struct spi_controller *ctlr)
3425{
3426 struct spi_controller **ptr;
3427 int ret;
3428
3429 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3430 if (!ptr)
3431 return -ENOMEM;
3432
3433 ret = spi_register_controller(ctlr);
3434 if (!ret) {
3435 *ptr = ctlr;
3436 devres_add(dev, ptr);
3437 } else {
3438 devres_free(ptr);
3439 }
3440
3441 return ret;
3442}
3443EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3444
3445static int __unregister(struct device *dev, void *null)
3446{
3447 spi_unregister_device(to_spi_device(dev));
3448 return 0;
3449}
3450
3451/**
3452 * spi_unregister_controller - unregister SPI master or slave controller
3453 * @ctlr: the controller being unregistered
3454 * Context: can sleep
3455 *
3456 * This call is used only by SPI controller drivers, which are the
3457 * only ones directly touching chip registers.
3458 *
3459 * This must be called from context that can sleep.
3460 *
3461 * Note that this function also drops a reference to the controller.
3462 */
3463void spi_unregister_controller(struct spi_controller *ctlr)
3464{
3465 struct spi_controller *found;
3466 int id = ctlr->bus_num;
3467
3468 /* Prevent addition of new devices, unregister existing ones */
3469 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3470 mutex_lock(&ctlr->add_lock);
3471
3472 device_for_each_child(&ctlr->dev, NULL, __unregister);
3473
3474 /* First make sure that this controller was ever added */
3475 mutex_lock(&board_lock);
3476 found = idr_find(&spi_master_idr, id);
3477 mutex_unlock(&board_lock);
3478 if (ctlr->queued) {
3479 if (spi_destroy_queue(ctlr))
3480 dev_err(&ctlr->dev, "queue remove failed\n");
3481 }
3482 mutex_lock(&board_lock);
3483 list_del(&ctlr->list);
3484 mutex_unlock(&board_lock);
3485
3486 device_del(&ctlr->dev);
3487
3488 /* Free bus id */
3489 mutex_lock(&board_lock);
3490 if (found == ctlr)
3491 idr_remove(&spi_master_idr, id);
3492 mutex_unlock(&board_lock);
3493
3494 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3495 mutex_unlock(&ctlr->add_lock);
3496
3497 /*
3498 * Release the last reference on the controller if its driver
3499 * has not yet been converted to devm_spi_alloc_master/slave().
3500 */
3501 if (!ctlr->devm_allocated)
3502 put_device(&ctlr->dev);
3503}
3504EXPORT_SYMBOL_GPL(spi_unregister_controller);
3505
3506static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3507{
3508 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3509}
3510
3511static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3512{
3513 mutex_lock(&ctlr->bus_lock_mutex);
3514 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3515 mutex_unlock(&ctlr->bus_lock_mutex);
3516}
3517
3518static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3519{
3520 mutex_lock(&ctlr->bus_lock_mutex);
3521 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3522 mutex_unlock(&ctlr->bus_lock_mutex);
3523}
3524
3525int spi_controller_suspend(struct spi_controller *ctlr)
3526{
3527 int ret = 0;
3528
3529 /* Basically no-ops for non-queued controllers */
3530 if (ctlr->queued) {
3531 ret = spi_stop_queue(ctlr);
3532 if (ret)
3533 dev_err(&ctlr->dev, "queue stop failed\n");
3534 }
3535
3536 __spi_mark_suspended(ctlr);
3537 return ret;
3538}
3539EXPORT_SYMBOL_GPL(spi_controller_suspend);
3540
3541int spi_controller_resume(struct spi_controller *ctlr)
3542{
3543 int ret = 0;
3544
3545 __spi_mark_resumed(ctlr);
3546
3547 if (ctlr->queued) {
3548 ret = spi_start_queue(ctlr);
3549 if (ret)
3550 dev_err(&ctlr->dev, "queue restart failed\n");
3551 }
3552 return ret;
3553}
3554EXPORT_SYMBOL_GPL(spi_controller_resume);
3555
3556/*-------------------------------------------------------------------------*/
3557
3558/* Core methods for spi_message alterations */
3559
3560static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3561 struct spi_message *msg,
3562 void *res)
3563{
3564 struct spi_replaced_transfers *rxfer = res;
3565 size_t i;
3566
3567 /* Call extra callback if requested */
3568 if (rxfer->release)
3569 rxfer->release(ctlr, msg, res);
3570
3571 /* Insert replaced transfers back into the message */
3572 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3573
3574 /* Remove the formerly inserted entries */
3575 for (i = 0; i < rxfer->inserted; i++)
3576 list_del(&rxfer->inserted_transfers[i].transfer_list);
3577}
3578
3579/**
3580 * spi_replace_transfers - replace transfers with several transfers
3581 * and register change with spi_message.resources
3582 * @msg: the spi_message we work upon
3583 * @xfer_first: the first spi_transfer we want to replace
3584 * @remove: number of transfers to remove
3585 * @insert: the number of transfers we want to insert instead
3586 * @release: extra release code necessary in some circumstances
3587 * @extradatasize: extra data to allocate (with alignment guarantees
3588 * of struct @spi_transfer)
3589 * @gfp: gfp flags
3590 *
3591 * Returns: pointer to @spi_replaced_transfers,
3592 * PTR_ERR(...) in case of errors.
3593 */
3594static struct spi_replaced_transfers *spi_replace_transfers(
3595 struct spi_message *msg,
3596 struct spi_transfer *xfer_first,
3597 size_t remove,
3598 size_t insert,
3599 spi_replaced_release_t release,
3600 size_t extradatasize,
3601 gfp_t gfp)
3602{
3603 struct spi_replaced_transfers *rxfer;
3604 struct spi_transfer *xfer;
3605 size_t i;
3606
3607 /* Allocate the structure using spi_res */
3608 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3609 struct_size(rxfer, inserted_transfers, insert)
3610 + extradatasize,
3611 gfp);
3612 if (!rxfer)
3613 return ERR_PTR(-ENOMEM);
3614
3615 /* The release code to invoke before running the generic release */
3616 rxfer->release = release;
3617
3618 /* Assign extradata */
3619 if (extradatasize)
3620 rxfer->extradata =
3621 &rxfer->inserted_transfers[insert];
3622
3623 /* Init the replaced_transfers list */
3624 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3625
3626 /*
3627 * Assign the list_entry after which we should reinsert
3628 * the @replaced_transfers - it may be spi_message.messages!
3629 */
3630 rxfer->replaced_after = xfer_first->transfer_list.prev;
3631
3632 /* Remove the requested number of transfers */
3633 for (i = 0; i < remove; i++) {
3634 /*
3635 * If the entry after replaced_after it is msg->transfers
3636 * then we have been requested to remove more transfers
3637 * than are in the list.
3638 */
3639 if (rxfer->replaced_after->next == &msg->transfers) {
3640 dev_err(&msg->spi->dev,
3641 "requested to remove more spi_transfers than are available\n");
3642 /* Insert replaced transfers back into the message */
3643 list_splice(&rxfer->replaced_transfers,
3644 rxfer->replaced_after);
3645
3646 /* Free the spi_replace_transfer structure... */
3647 spi_res_free(rxfer);
3648
3649 /* ...and return with an error */
3650 return ERR_PTR(-EINVAL);
3651 }
3652
3653 /*
3654 * Remove the entry after replaced_after from list of
3655 * transfers and add it to list of replaced_transfers.
3656 */
3657 list_move_tail(rxfer->replaced_after->next,
3658 &rxfer->replaced_transfers);
3659 }
3660
3661 /*
3662 * Create copy of the given xfer with identical settings
3663 * based on the first transfer to get removed.
3664 */
3665 for (i = 0; i < insert; i++) {
3666 /* We need to run in reverse order */
3667 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3668
3669 /* Copy all spi_transfer data */
3670 memcpy(xfer, xfer_first, sizeof(*xfer));
3671
3672 /* Add to list */
3673 list_add(&xfer->transfer_list, rxfer->replaced_after);
3674
3675 /* Clear cs_change and delay for all but the last */
3676 if (i) {
3677 xfer->cs_change = false;
3678 xfer->delay.value = 0;
3679 }
3680 }
3681
3682 /* Set up inserted... */
3683 rxfer->inserted = insert;
3684
3685 /* ...and register it with spi_res/spi_message */
3686 spi_res_add(msg, rxfer);
3687
3688 return rxfer;
3689}
3690
3691static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3692 struct spi_message *msg,
3693 struct spi_transfer **xferp,
3694 size_t maxsize)
3695{
3696 struct spi_transfer *xfer = *xferp, *xfers;
3697 struct spi_replaced_transfers *srt;
3698 size_t offset;
3699 size_t count, i;
3700
3701 /* Calculate how many we have to replace */
3702 count = DIV_ROUND_UP(xfer->len, maxsize);
3703
3704 /* Create replacement */
3705 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3706 if (IS_ERR(srt))
3707 return PTR_ERR(srt);
3708 xfers = srt->inserted_transfers;
3709
3710 /*
3711 * Now handle each of those newly inserted spi_transfers.
3712 * Note that the replacements spi_transfers all are preset
3713 * to the same values as *xferp, so tx_buf, rx_buf and len
3714 * are all identical (as well as most others)
3715 * so we just have to fix up len and the pointers.
3716 *
3717 * This also includes support for the depreciated
3718 * spi_message.is_dma_mapped interface.
3719 */
3720
3721 /*
3722 * The first transfer just needs the length modified, so we
3723 * run it outside the loop.
3724 */
3725 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3726
3727 /* All the others need rx_buf/tx_buf also set */
3728 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3729 /* Update rx_buf, tx_buf and DMA */
3730 if (xfers[i].rx_buf)
3731 xfers[i].rx_buf += offset;
3732 if (xfers[i].rx_dma)
3733 xfers[i].rx_dma += offset;
3734 if (xfers[i].tx_buf)
3735 xfers[i].tx_buf += offset;
3736 if (xfers[i].tx_dma)
3737 xfers[i].tx_dma += offset;
3738
3739 /* Update length */
3740 xfers[i].len = min(maxsize, xfers[i].len - offset);
3741 }
3742
3743 /*
3744 * We set up xferp to the last entry we have inserted,
3745 * so that we skip those already split transfers.
3746 */
3747 *xferp = &xfers[count - 1];
3748
3749 /* Increment statistics counters */
3750 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3751 transfers_split_maxsize);
3752 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3753 transfers_split_maxsize);
3754
3755 return 0;
3756}
3757
3758/**
3759 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3760 * when an individual transfer exceeds a
3761 * certain size
3762 * @ctlr: the @spi_controller for this transfer
3763 * @msg: the @spi_message to transform
3764 * @maxsize: the maximum when to apply this
3765 *
3766 * This function allocates resources that are automatically freed during the
3767 * spi message unoptimize phase so this function should only be called from
3768 * optimize_message callbacks.
3769 *
3770 * Return: status of transformation
3771 */
3772int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3773 struct spi_message *msg,
3774 size_t maxsize)
3775{
3776 struct spi_transfer *xfer;
3777 int ret;
3778
3779 /*
3780 * Iterate over the transfer_list,
3781 * but note that xfer is advanced to the last transfer inserted
3782 * to avoid checking sizes again unnecessarily (also xfer does
3783 * potentially belong to a different list by the time the
3784 * replacement has happened).
3785 */
3786 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3787 if (xfer->len > maxsize) {
3788 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3789 maxsize);
3790 if (ret)
3791 return ret;
3792 }
3793 }
3794
3795 return 0;
3796}
3797EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3798
3799
3800/**
3801 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3802 * when an individual transfer exceeds a
3803 * certain number of SPI words
3804 * @ctlr: the @spi_controller for this transfer
3805 * @msg: the @spi_message to transform
3806 * @maxwords: the number of words to limit each transfer to
3807 *
3808 * This function allocates resources that are automatically freed during the
3809 * spi message unoptimize phase so this function should only be called from
3810 * optimize_message callbacks.
3811 *
3812 * Return: status of transformation
3813 */
3814int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3815 struct spi_message *msg,
3816 size_t maxwords)
3817{
3818 struct spi_transfer *xfer;
3819
3820 /*
3821 * Iterate over the transfer_list,
3822 * but note that xfer is advanced to the last transfer inserted
3823 * to avoid checking sizes again unnecessarily (also xfer does
3824 * potentially belong to a different list by the time the
3825 * replacement has happened).
3826 */
3827 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3828 size_t maxsize;
3829 int ret;
3830
3831 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3832 if (xfer->len > maxsize) {
3833 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3834 maxsize);
3835 if (ret)
3836 return ret;
3837 }
3838 }
3839
3840 return 0;
3841}
3842EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3843
3844/*-------------------------------------------------------------------------*/
3845
3846/*
3847 * Core methods for SPI controller protocol drivers. Some of the
3848 * other core methods are currently defined as inline functions.
3849 */
3850
3851static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3852 u8 bits_per_word)
3853{
3854 if (ctlr->bits_per_word_mask) {
3855 /* Only 32 bits fit in the mask */
3856 if (bits_per_word > 32)
3857 return -EINVAL;
3858 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3859 return -EINVAL;
3860 }
3861
3862 return 0;
3863}
3864
3865/**
3866 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3867 * @spi: the device that requires specific CS timing configuration
3868 *
3869 * Return: zero on success, else a negative error code.
3870 */
3871static int spi_set_cs_timing(struct spi_device *spi)
3872{
3873 struct device *parent = spi->controller->dev.parent;
3874 int status = 0;
3875
3876 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3877 if (spi->controller->auto_runtime_pm) {
3878 status = pm_runtime_get_sync(parent);
3879 if (status < 0) {
3880 pm_runtime_put_noidle(parent);
3881 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3882 status);
3883 return status;
3884 }
3885
3886 status = spi->controller->set_cs_timing(spi);
3887 pm_runtime_mark_last_busy(parent);
3888 pm_runtime_put_autosuspend(parent);
3889 } else {
3890 status = spi->controller->set_cs_timing(spi);
3891 }
3892 }
3893 return status;
3894}
3895
3896/**
3897 * spi_setup - setup SPI mode and clock rate
3898 * @spi: the device whose settings are being modified
3899 * Context: can sleep, and no requests are queued to the device
3900 *
3901 * SPI protocol drivers may need to update the transfer mode if the
3902 * device doesn't work with its default. They may likewise need
3903 * to update clock rates or word sizes from initial values. This function
3904 * changes those settings, and must be called from a context that can sleep.
3905 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3906 * effect the next time the device is selected and data is transferred to
3907 * or from it. When this function returns, the SPI device is deselected.
3908 *
3909 * Note that this call will fail if the protocol driver specifies an option
3910 * that the underlying controller or its driver does not support. For
3911 * example, not all hardware supports wire transfers using nine bit words,
3912 * LSB-first wire encoding, or active-high chipselects.
3913 *
3914 * Return: zero on success, else a negative error code.
3915 */
3916int spi_setup(struct spi_device *spi)
3917{
3918 unsigned bad_bits, ugly_bits;
3919 int status = 0;
3920
3921 /*
3922 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3923 * are set at the same time.
3924 */
3925 if ((hweight_long(spi->mode &
3926 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3927 (hweight_long(spi->mode &
3928 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3929 dev_err(&spi->dev,
3930 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3931 return -EINVAL;
3932 }
3933 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3934 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3935 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3936 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3937 return -EINVAL;
3938 /*
3939 * Help drivers fail *cleanly* when they need options
3940 * that aren't supported with their current controller.
3941 * SPI_CS_WORD has a fallback software implementation,
3942 * so it is ignored here.
3943 */
3944 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3945 SPI_NO_TX | SPI_NO_RX);
3946 ugly_bits = bad_bits &
3947 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3948 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3949 if (ugly_bits) {
3950 dev_warn(&spi->dev,
3951 "setup: ignoring unsupported mode bits %x\n",
3952 ugly_bits);
3953 spi->mode &= ~ugly_bits;
3954 bad_bits &= ~ugly_bits;
3955 }
3956 if (bad_bits) {
3957 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3958 bad_bits);
3959 return -EINVAL;
3960 }
3961
3962 if (!spi->bits_per_word) {
3963 spi->bits_per_word = 8;
3964 } else {
3965 /*
3966 * Some controllers may not support the default 8 bits-per-word
3967 * so only perform the check when this is explicitly provided.
3968 */
3969 status = __spi_validate_bits_per_word(spi->controller,
3970 spi->bits_per_word);
3971 if (status)
3972 return status;
3973 }
3974
3975 if (spi->controller->max_speed_hz &&
3976 (!spi->max_speed_hz ||
3977 spi->max_speed_hz > spi->controller->max_speed_hz))
3978 spi->max_speed_hz = spi->controller->max_speed_hz;
3979
3980 mutex_lock(&spi->controller->io_mutex);
3981
3982 if (spi->controller->setup) {
3983 status = spi->controller->setup(spi);
3984 if (status) {
3985 mutex_unlock(&spi->controller->io_mutex);
3986 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3987 status);
3988 return status;
3989 }
3990 }
3991
3992 status = spi_set_cs_timing(spi);
3993 if (status) {
3994 mutex_unlock(&spi->controller->io_mutex);
3995 return status;
3996 }
3997
3998 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3999 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
4000 if (status < 0) {
4001 mutex_unlock(&spi->controller->io_mutex);
4002 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
4003 status);
4004 return status;
4005 }
4006
4007 /*
4008 * We do not want to return positive value from pm_runtime_get,
4009 * there are many instances of devices calling spi_setup() and
4010 * checking for a non-zero return value instead of a negative
4011 * return value.
4012 */
4013 status = 0;
4014
4015 spi_set_cs(spi, false, true);
4016 pm_runtime_mark_last_busy(spi->controller->dev.parent);
4017 pm_runtime_put_autosuspend(spi->controller->dev.parent);
4018 } else {
4019 spi_set_cs(spi, false, true);
4020 }
4021
4022 mutex_unlock(&spi->controller->io_mutex);
4023
4024 if (spi->rt && !spi->controller->rt) {
4025 spi->controller->rt = true;
4026 spi_set_thread_rt(spi->controller);
4027 }
4028
4029 trace_spi_setup(spi, status);
4030
4031 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4032 spi->mode & SPI_MODE_X_MASK,
4033 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4034 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4035 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
4036 (spi->mode & SPI_LOOP) ? "loopback, " : "",
4037 spi->bits_per_word, spi->max_speed_hz,
4038 status);
4039
4040 return status;
4041}
4042EXPORT_SYMBOL_GPL(spi_setup);
4043
4044static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4045 struct spi_device *spi)
4046{
4047 int delay1, delay2;
4048
4049 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4050 if (delay1 < 0)
4051 return delay1;
4052
4053 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4054 if (delay2 < 0)
4055 return delay2;
4056
4057 if (delay1 < delay2)
4058 memcpy(&xfer->word_delay, &spi->word_delay,
4059 sizeof(xfer->word_delay));
4060
4061 return 0;
4062}
4063
4064static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4065{
4066 struct spi_controller *ctlr = spi->controller;
4067 struct spi_transfer *xfer;
4068 int w_size;
4069
4070 if (list_empty(&message->transfers))
4071 return -EINVAL;
4072
4073 message->spi = spi;
4074
4075 /*
4076 * Half-duplex links include original MicroWire, and ones with
4077 * only one data pin like SPI_3WIRE (switches direction) or where
4078 * either MOSI or MISO is missing. They can also be caused by
4079 * software limitations.
4080 */
4081 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4082 (spi->mode & SPI_3WIRE)) {
4083 unsigned flags = ctlr->flags;
4084
4085 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4086 if (xfer->rx_buf && xfer->tx_buf)
4087 return -EINVAL;
4088 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4089 return -EINVAL;
4090 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4091 return -EINVAL;
4092 }
4093 }
4094
4095 /*
4096 * Set transfer bits_per_word and max speed as spi device default if
4097 * it is not set for this transfer.
4098 * Set transfer tx_nbits and rx_nbits as single transfer default
4099 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4100 * Ensure transfer word_delay is at least as long as that required by
4101 * device itself.
4102 */
4103 message->frame_length = 0;
4104 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4105 xfer->effective_speed_hz = 0;
4106 message->frame_length += xfer->len;
4107 if (!xfer->bits_per_word)
4108 xfer->bits_per_word = spi->bits_per_word;
4109
4110 if (!xfer->speed_hz)
4111 xfer->speed_hz = spi->max_speed_hz;
4112
4113 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4114 xfer->speed_hz = ctlr->max_speed_hz;
4115
4116 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4117 return -EINVAL;
4118
4119 /*
4120 * SPI transfer length should be multiple of SPI word size
4121 * where SPI word size should be power-of-two multiple.
4122 */
4123 if (xfer->bits_per_word <= 8)
4124 w_size = 1;
4125 else if (xfer->bits_per_word <= 16)
4126 w_size = 2;
4127 else
4128 w_size = 4;
4129
4130 /* No partial transfers accepted */
4131 if (xfer->len % w_size)
4132 return -EINVAL;
4133
4134 if (xfer->speed_hz && ctlr->min_speed_hz &&
4135 xfer->speed_hz < ctlr->min_speed_hz)
4136 return -EINVAL;
4137
4138 if (xfer->tx_buf && !xfer->tx_nbits)
4139 xfer->tx_nbits = SPI_NBITS_SINGLE;
4140 if (xfer->rx_buf && !xfer->rx_nbits)
4141 xfer->rx_nbits = SPI_NBITS_SINGLE;
4142 /*
4143 * Check transfer tx/rx_nbits:
4144 * 1. check the value matches one of single, dual and quad
4145 * 2. check tx/rx_nbits match the mode in spi_device
4146 */
4147 if (xfer->tx_buf) {
4148 if (spi->mode & SPI_NO_TX)
4149 return -EINVAL;
4150 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4151 xfer->tx_nbits != SPI_NBITS_DUAL &&
4152 xfer->tx_nbits != SPI_NBITS_QUAD)
4153 return -EINVAL;
4154 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4155 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4156 return -EINVAL;
4157 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4158 !(spi->mode & SPI_TX_QUAD))
4159 return -EINVAL;
4160 }
4161 /* Check transfer rx_nbits */
4162 if (xfer->rx_buf) {
4163 if (spi->mode & SPI_NO_RX)
4164 return -EINVAL;
4165 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4166 xfer->rx_nbits != SPI_NBITS_DUAL &&
4167 xfer->rx_nbits != SPI_NBITS_QUAD)
4168 return -EINVAL;
4169 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4170 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4171 return -EINVAL;
4172 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4173 !(spi->mode & SPI_RX_QUAD))
4174 return -EINVAL;
4175 }
4176
4177 if (_spi_xfer_word_delay_update(xfer, spi))
4178 return -EINVAL;
4179 }
4180
4181 message->status = -EINPROGRESS;
4182
4183 return 0;
4184}
4185
4186/*
4187 * spi_split_transfers - generic handling of transfer splitting
4188 * @msg: the message to split
4189 *
4190 * Under certain conditions, a SPI controller may not support arbitrary
4191 * transfer sizes or other features required by a peripheral. This function
4192 * will split the transfers in the message into smaller transfers that are
4193 * supported by the controller.
4194 *
4195 * Controllers with special requirements not covered here can also split
4196 * transfers in the optimize_message() callback.
4197 *
4198 * Context: can sleep
4199 * Return: zero on success, else a negative error code
4200 */
4201static int spi_split_transfers(struct spi_message *msg)
4202{
4203 struct spi_controller *ctlr = msg->spi->controller;
4204 struct spi_transfer *xfer;
4205 int ret;
4206
4207 /*
4208 * If an SPI controller does not support toggling the CS line on each
4209 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4210 * for the CS line, we can emulate the CS-per-word hardware function by
4211 * splitting transfers into one-word transfers and ensuring that
4212 * cs_change is set for each transfer.
4213 */
4214 if ((msg->spi->mode & SPI_CS_WORD) &&
4215 (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4216 ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4217 if (ret)
4218 return ret;
4219
4220 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4221 /* Don't change cs_change on the last entry in the list */
4222 if (list_is_last(&xfer->transfer_list, &msg->transfers))
4223 break;
4224
4225 xfer->cs_change = 1;
4226 }
4227 } else {
4228 ret = spi_split_transfers_maxsize(ctlr, msg,
4229 spi_max_transfer_size(msg->spi));
4230 if (ret)
4231 return ret;
4232 }
4233
4234 return 0;
4235}
4236
4237/*
4238 * __spi_optimize_message - shared implementation for spi_optimize_message()
4239 * and spi_maybe_optimize_message()
4240 * @spi: the device that will be used for the message
4241 * @msg: the message to optimize
4242 *
4243 * Peripheral drivers will call spi_optimize_message() and the spi core will
4244 * call spi_maybe_optimize_message() instead of calling this directly.
4245 *
4246 * It is not valid to call this on a message that has already been optimized.
4247 *
4248 * Return: zero on success, else a negative error code
4249 */
4250static int __spi_optimize_message(struct spi_device *spi,
4251 struct spi_message *msg)
4252{
4253 struct spi_controller *ctlr = spi->controller;
4254 int ret;
4255
4256 ret = __spi_validate(spi, msg);
4257 if (ret)
4258 return ret;
4259
4260 ret = spi_split_transfers(msg);
4261 if (ret)
4262 return ret;
4263
4264 if (ctlr->optimize_message) {
4265 ret = ctlr->optimize_message(msg);
4266 if (ret) {
4267 spi_res_release(ctlr, msg);
4268 return ret;
4269 }
4270 }
4271
4272 msg->optimized = true;
4273
4274 return 0;
4275}
4276
4277/*
4278 * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4279 * @spi: the device that will be used for the message
4280 * @msg: the message to optimize
4281 * Return: zero on success, else a negative error code
4282 */
4283static int spi_maybe_optimize_message(struct spi_device *spi,
4284 struct spi_message *msg)
4285{
4286 if (msg->pre_optimized)
4287 return 0;
4288
4289 return __spi_optimize_message(spi, msg);
4290}
4291
4292/**
4293 * spi_optimize_message - do any one-time validation and setup for a SPI message
4294 * @spi: the device that will be used for the message
4295 * @msg: the message to optimize
4296 *
4297 * Peripheral drivers that reuse the same message repeatedly may call this to
4298 * perform as much message prep as possible once, rather than repeating it each
4299 * time a message transfer is performed to improve throughput and reduce CPU
4300 * usage.
4301 *
4302 * Once a message has been optimized, it cannot be modified with the exception
4303 * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4304 * only the data in the memory it points to).
4305 *
4306 * Calls to this function must be balanced with calls to spi_unoptimize_message()
4307 * to avoid leaking resources.
4308 *
4309 * Context: can sleep
4310 * Return: zero on success, else a negative error code
4311 */
4312int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4313{
4314 int ret;
4315
4316 ret = __spi_optimize_message(spi, msg);
4317 if (ret)
4318 return ret;
4319
4320 /*
4321 * This flag indicates that the peripheral driver called spi_optimize_message()
4322 * and therefore we shouldn't unoptimize message automatically when finalizing
4323 * the message but rather wait until spi_unoptimize_message() is called
4324 * by the peripheral driver.
4325 */
4326 msg->pre_optimized = true;
4327
4328 return 0;
4329}
4330EXPORT_SYMBOL_GPL(spi_optimize_message);
4331
4332/**
4333 * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4334 * @msg: the message to unoptimize
4335 *
4336 * Calls to this function must be balanced with calls to spi_optimize_message().
4337 *
4338 * Context: can sleep
4339 */
4340void spi_unoptimize_message(struct spi_message *msg)
4341{
4342 __spi_unoptimize_message(msg);
4343 msg->pre_optimized = false;
4344}
4345EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4346
4347static int __spi_async(struct spi_device *spi, struct spi_message *message)
4348{
4349 struct spi_controller *ctlr = spi->controller;
4350 struct spi_transfer *xfer;
4351
4352 /*
4353 * Some controllers do not support doing regular SPI transfers. Return
4354 * ENOTSUPP when this is the case.
4355 */
4356 if (!ctlr->transfer)
4357 return -ENOTSUPP;
4358
4359 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4360 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4361
4362 trace_spi_message_submit(message);
4363
4364 if (!ctlr->ptp_sts_supported) {
4365 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4366 xfer->ptp_sts_word_pre = 0;
4367 ptp_read_system_prets(xfer->ptp_sts);
4368 }
4369 }
4370
4371 return ctlr->transfer(spi, message);
4372}
4373
4374/**
4375 * spi_async - asynchronous SPI transfer
4376 * @spi: device with which data will be exchanged
4377 * @message: describes the data transfers, including completion callback
4378 * Context: any (IRQs may be blocked, etc)
4379 *
4380 * This call may be used in_irq and other contexts which can't sleep,
4381 * as well as from task contexts which can sleep.
4382 *
4383 * The completion callback is invoked in a context which can't sleep.
4384 * Before that invocation, the value of message->status is undefined.
4385 * When the callback is issued, message->status holds either zero (to
4386 * indicate complete success) or a negative error code. After that
4387 * callback returns, the driver which issued the transfer request may
4388 * deallocate the associated memory; it's no longer in use by any SPI
4389 * core or controller driver code.
4390 *
4391 * Note that although all messages to a spi_device are handled in
4392 * FIFO order, messages may go to different devices in other orders.
4393 * Some device might be higher priority, or have various "hard" access
4394 * time requirements, for example.
4395 *
4396 * On detection of any fault during the transfer, processing of
4397 * the entire message is aborted, and the device is deselected.
4398 * Until returning from the associated message completion callback,
4399 * no other spi_message queued to that device will be processed.
4400 * (This rule applies equally to all the synchronous transfer calls,
4401 * which are wrappers around this core asynchronous primitive.)
4402 *
4403 * Return: zero on success, else a negative error code.
4404 */
4405int spi_async(struct spi_device *spi, struct spi_message *message)
4406{
4407 struct spi_controller *ctlr = spi->controller;
4408 int ret;
4409 unsigned long flags;
4410
4411 ret = spi_maybe_optimize_message(spi, message);
4412 if (ret)
4413 return ret;
4414
4415 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4416
4417 if (ctlr->bus_lock_flag)
4418 ret = -EBUSY;
4419 else
4420 ret = __spi_async(spi, message);
4421
4422 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4423
4424 spi_maybe_unoptimize_message(message);
4425
4426 return ret;
4427}
4428EXPORT_SYMBOL_GPL(spi_async);
4429
4430static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4431{
4432 bool was_busy;
4433 int ret;
4434
4435 mutex_lock(&ctlr->io_mutex);
4436
4437 was_busy = ctlr->busy;
4438
4439 ctlr->cur_msg = msg;
4440 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4441 if (ret)
4442 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4443 ctlr->cur_msg = NULL;
4444 ctlr->fallback = false;
4445
4446 if (!was_busy) {
4447 kfree(ctlr->dummy_rx);
4448 ctlr->dummy_rx = NULL;
4449 kfree(ctlr->dummy_tx);
4450 ctlr->dummy_tx = NULL;
4451 if (ctlr->unprepare_transfer_hardware &&
4452 ctlr->unprepare_transfer_hardware(ctlr))
4453 dev_err(&ctlr->dev,
4454 "failed to unprepare transfer hardware\n");
4455 spi_idle_runtime_pm(ctlr);
4456 }
4457
4458 mutex_unlock(&ctlr->io_mutex);
4459}
4460
4461/*-------------------------------------------------------------------------*/
4462
4463/*
4464 * Utility methods for SPI protocol drivers, layered on
4465 * top of the core. Some other utility methods are defined as
4466 * inline functions.
4467 */
4468
4469static void spi_complete(void *arg)
4470{
4471 complete(arg);
4472}
4473
4474static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4475{
4476 DECLARE_COMPLETION_ONSTACK(done);
4477 unsigned long flags;
4478 int status;
4479 struct spi_controller *ctlr = spi->controller;
4480
4481 if (__spi_check_suspended(ctlr)) {
4482 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4483 return -ESHUTDOWN;
4484 }
4485
4486 status = spi_maybe_optimize_message(spi, message);
4487 if (status)
4488 return status;
4489
4490 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4491 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4492
4493 /*
4494 * Checking queue_empty here only guarantees async/sync message
4495 * ordering when coming from the same context. It does not need to
4496 * guard against reentrancy from a different context. The io_mutex
4497 * will catch those cases.
4498 */
4499 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4500 message->actual_length = 0;
4501 message->status = -EINPROGRESS;
4502
4503 trace_spi_message_submit(message);
4504
4505 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4506 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4507
4508 __spi_transfer_message_noqueue(ctlr, message);
4509
4510 return message->status;
4511 }
4512
4513 /*
4514 * There are messages in the async queue that could have originated
4515 * from the same context, so we need to preserve ordering.
4516 * Therefor we send the message to the async queue and wait until they
4517 * are completed.
4518 */
4519 message->complete = spi_complete;
4520 message->context = &done;
4521
4522 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4523 status = __spi_async(spi, message);
4524 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4525
4526 if (status == 0) {
4527 wait_for_completion(&done);
4528 status = message->status;
4529 }
4530 message->complete = NULL;
4531 message->context = NULL;
4532
4533 return status;
4534}
4535
4536/**
4537 * spi_sync - blocking/synchronous SPI data transfers
4538 * @spi: device with which data will be exchanged
4539 * @message: describes the data transfers
4540 * Context: can sleep
4541 *
4542 * This call may only be used from a context that may sleep. The sleep
4543 * is non-interruptible, and has no timeout. Low-overhead controller
4544 * drivers may DMA directly into and out of the message buffers.
4545 *
4546 * Note that the SPI device's chip select is active during the message,
4547 * and then is normally disabled between messages. Drivers for some
4548 * frequently-used devices may want to minimize costs of selecting a chip,
4549 * by leaving it selected in anticipation that the next message will go
4550 * to the same chip. (That may increase power usage.)
4551 *
4552 * Also, the caller is guaranteeing that the memory associated with the
4553 * message will not be freed before this call returns.
4554 *
4555 * Return: zero on success, else a negative error code.
4556 */
4557int spi_sync(struct spi_device *spi, struct spi_message *message)
4558{
4559 int ret;
4560
4561 mutex_lock(&spi->controller->bus_lock_mutex);
4562 ret = __spi_sync(spi, message);
4563 mutex_unlock(&spi->controller->bus_lock_mutex);
4564
4565 return ret;
4566}
4567EXPORT_SYMBOL_GPL(spi_sync);
4568
4569/**
4570 * spi_sync_locked - version of spi_sync with exclusive bus usage
4571 * @spi: device with which data will be exchanged
4572 * @message: describes the data transfers
4573 * Context: can sleep
4574 *
4575 * This call may only be used from a context that may sleep. The sleep
4576 * is non-interruptible, and has no timeout. Low-overhead controller
4577 * drivers may DMA directly into and out of the message buffers.
4578 *
4579 * This call should be used by drivers that require exclusive access to the
4580 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4581 * be released by a spi_bus_unlock call when the exclusive access is over.
4582 *
4583 * Return: zero on success, else a negative error code.
4584 */
4585int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4586{
4587 return __spi_sync(spi, message);
4588}
4589EXPORT_SYMBOL_GPL(spi_sync_locked);
4590
4591/**
4592 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4593 * @ctlr: SPI bus master that should be locked for exclusive bus access
4594 * Context: can sleep
4595 *
4596 * This call may only be used from a context that may sleep. The sleep
4597 * is non-interruptible, and has no timeout.
4598 *
4599 * This call should be used by drivers that require exclusive access to the
4600 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4601 * exclusive access is over. Data transfer must be done by spi_sync_locked
4602 * and spi_async_locked calls when the SPI bus lock is held.
4603 *
4604 * Return: always zero.
4605 */
4606int spi_bus_lock(struct spi_controller *ctlr)
4607{
4608 unsigned long flags;
4609
4610 mutex_lock(&ctlr->bus_lock_mutex);
4611
4612 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4613 ctlr->bus_lock_flag = 1;
4614 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4615
4616 /* Mutex remains locked until spi_bus_unlock() is called */
4617
4618 return 0;
4619}
4620EXPORT_SYMBOL_GPL(spi_bus_lock);
4621
4622/**
4623 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4624 * @ctlr: SPI bus master that was locked for exclusive bus access
4625 * Context: can sleep
4626 *
4627 * This call may only be used from a context that may sleep. The sleep
4628 * is non-interruptible, and has no timeout.
4629 *
4630 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4631 * call.
4632 *
4633 * Return: always zero.
4634 */
4635int spi_bus_unlock(struct spi_controller *ctlr)
4636{
4637 ctlr->bus_lock_flag = 0;
4638
4639 mutex_unlock(&ctlr->bus_lock_mutex);
4640
4641 return 0;
4642}
4643EXPORT_SYMBOL_GPL(spi_bus_unlock);
4644
4645/* Portable code must never pass more than 32 bytes */
4646#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4647
4648static u8 *buf;
4649
4650/**
4651 * spi_write_then_read - SPI synchronous write followed by read
4652 * @spi: device with which data will be exchanged
4653 * @txbuf: data to be written (need not be DMA-safe)
4654 * @n_tx: size of txbuf, in bytes
4655 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4656 * @n_rx: size of rxbuf, in bytes
4657 * Context: can sleep
4658 *
4659 * This performs a half duplex MicroWire style transaction with the
4660 * device, sending txbuf and then reading rxbuf. The return value
4661 * is zero for success, else a negative errno status code.
4662 * This call may only be used from a context that may sleep.
4663 *
4664 * Parameters to this routine are always copied using a small buffer.
4665 * Performance-sensitive or bulk transfer code should instead use
4666 * spi_{async,sync}() calls with DMA-safe buffers.
4667 *
4668 * Return: zero on success, else a negative error code.
4669 */
4670int spi_write_then_read(struct spi_device *spi,
4671 const void *txbuf, unsigned n_tx,
4672 void *rxbuf, unsigned n_rx)
4673{
4674 static DEFINE_MUTEX(lock);
4675
4676 int status;
4677 struct spi_message message;
4678 struct spi_transfer x[2];
4679 u8 *local_buf;
4680
4681 /*
4682 * Use preallocated DMA-safe buffer if we can. We can't avoid
4683 * copying here, (as a pure convenience thing), but we can
4684 * keep heap costs out of the hot path unless someone else is
4685 * using the pre-allocated buffer or the transfer is too large.
4686 */
4687 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4688 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4689 GFP_KERNEL | GFP_DMA);
4690 if (!local_buf)
4691 return -ENOMEM;
4692 } else {
4693 local_buf = buf;
4694 }
4695
4696 spi_message_init(&message);
4697 memset(x, 0, sizeof(x));
4698 if (n_tx) {
4699 x[0].len = n_tx;
4700 spi_message_add_tail(&x[0], &message);
4701 }
4702 if (n_rx) {
4703 x[1].len = n_rx;
4704 spi_message_add_tail(&x[1], &message);
4705 }
4706
4707 memcpy(local_buf, txbuf, n_tx);
4708 x[0].tx_buf = local_buf;
4709 x[1].rx_buf = local_buf + n_tx;
4710
4711 /* Do the I/O */
4712 status = spi_sync(spi, &message);
4713 if (status == 0)
4714 memcpy(rxbuf, x[1].rx_buf, n_rx);
4715
4716 if (x[0].tx_buf == buf)
4717 mutex_unlock(&lock);
4718 else
4719 kfree(local_buf);
4720
4721 return status;
4722}
4723EXPORT_SYMBOL_GPL(spi_write_then_read);
4724
4725/*-------------------------------------------------------------------------*/
4726
4727#if IS_ENABLED(CONFIG_OF_DYNAMIC)
4728/* Must call put_device() when done with returned spi_device device */
4729static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4730{
4731 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4732
4733 return dev ? to_spi_device(dev) : NULL;
4734}
4735
4736/* The spi controllers are not using spi_bus, so we find it with another way */
4737static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4738{
4739 struct device *dev;
4740
4741 dev = class_find_device_by_of_node(&spi_master_class, node);
4742 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4743 dev = class_find_device_by_of_node(&spi_slave_class, node);
4744 if (!dev)
4745 return NULL;
4746
4747 /* Reference got in class_find_device */
4748 return container_of(dev, struct spi_controller, dev);
4749}
4750
4751static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4752 void *arg)
4753{
4754 struct of_reconfig_data *rd = arg;
4755 struct spi_controller *ctlr;
4756 struct spi_device *spi;
4757
4758 switch (of_reconfig_get_state_change(action, arg)) {
4759 case OF_RECONFIG_CHANGE_ADD:
4760 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4761 if (ctlr == NULL)
4762 return NOTIFY_OK; /* Not for us */
4763
4764 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4765 put_device(&ctlr->dev);
4766 return NOTIFY_OK;
4767 }
4768
4769 /*
4770 * Clear the flag before adding the device so that fw_devlink
4771 * doesn't skip adding consumers to this device.
4772 */
4773 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4774 spi = of_register_spi_device(ctlr, rd->dn);
4775 put_device(&ctlr->dev);
4776
4777 if (IS_ERR(spi)) {
4778 pr_err("%s: failed to create for '%pOF'\n",
4779 __func__, rd->dn);
4780 of_node_clear_flag(rd->dn, OF_POPULATED);
4781 return notifier_from_errno(PTR_ERR(spi));
4782 }
4783 break;
4784
4785 case OF_RECONFIG_CHANGE_REMOVE:
4786 /* Already depopulated? */
4787 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4788 return NOTIFY_OK;
4789
4790 /* Find our device by node */
4791 spi = of_find_spi_device_by_node(rd->dn);
4792 if (spi == NULL)
4793 return NOTIFY_OK; /* No? not meant for us */
4794
4795 /* Unregister takes one ref away */
4796 spi_unregister_device(spi);
4797
4798 /* And put the reference of the find */
4799 put_device(&spi->dev);
4800 break;
4801 }
4802
4803 return NOTIFY_OK;
4804}
4805
4806static struct notifier_block spi_of_notifier = {
4807 .notifier_call = of_spi_notify,
4808};
4809#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4810extern struct notifier_block spi_of_notifier;
4811#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4812
4813#if IS_ENABLED(CONFIG_ACPI)
4814static int spi_acpi_controller_match(struct device *dev, const void *data)
4815{
4816 return ACPI_COMPANION(dev->parent) == data;
4817}
4818
4819struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4820{
4821 struct device *dev;
4822
4823 dev = class_find_device(&spi_master_class, NULL, adev,
4824 spi_acpi_controller_match);
4825 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4826 dev = class_find_device(&spi_slave_class, NULL, adev,
4827 spi_acpi_controller_match);
4828 if (!dev)
4829 return NULL;
4830
4831 return container_of(dev, struct spi_controller, dev);
4832}
4833EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4834
4835static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4836{
4837 struct device *dev;
4838
4839 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4840 return to_spi_device(dev);
4841}
4842
4843static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4844 void *arg)
4845{
4846 struct acpi_device *adev = arg;
4847 struct spi_controller *ctlr;
4848 struct spi_device *spi;
4849
4850 switch (value) {
4851 case ACPI_RECONFIG_DEVICE_ADD:
4852 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4853 if (!ctlr)
4854 break;
4855
4856 acpi_register_spi_device(ctlr, adev);
4857 put_device(&ctlr->dev);
4858 break;
4859 case ACPI_RECONFIG_DEVICE_REMOVE:
4860 if (!acpi_device_enumerated(adev))
4861 break;
4862
4863 spi = acpi_spi_find_device_by_adev(adev);
4864 if (!spi)
4865 break;
4866
4867 spi_unregister_device(spi);
4868 put_device(&spi->dev);
4869 break;
4870 }
4871
4872 return NOTIFY_OK;
4873}
4874
4875static struct notifier_block spi_acpi_notifier = {
4876 .notifier_call = acpi_spi_notify,
4877};
4878#else
4879extern struct notifier_block spi_acpi_notifier;
4880#endif
4881
4882static int __init spi_init(void)
4883{
4884 int status;
4885
4886 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4887 if (!buf) {
4888 status = -ENOMEM;
4889 goto err0;
4890 }
4891
4892 status = bus_register(&spi_bus_type);
4893 if (status < 0)
4894 goto err1;
4895
4896 status = class_register(&spi_master_class);
4897 if (status < 0)
4898 goto err2;
4899
4900 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4901 status = class_register(&spi_slave_class);
4902 if (status < 0)
4903 goto err3;
4904 }
4905
4906 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4907 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4908 if (IS_ENABLED(CONFIG_ACPI))
4909 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4910
4911 return 0;
4912
4913err3:
4914 class_unregister(&spi_master_class);
4915err2:
4916 bus_unregister(&spi_bus_type);
4917err1:
4918 kfree(buf);
4919 buf = NULL;
4920err0:
4921 return status;
4922}
4923
4924/*
4925 * A board_info is normally registered in arch_initcall(),
4926 * but even essential drivers wait till later.
4927 *
4928 * REVISIT only boardinfo really needs static linking. The rest (device and
4929 * driver registration) _could_ be dynamically linked (modular) ... Costs
4930 * include needing to have boardinfo data structures be much more public.
4931 */
4932postcore_initcall(spi_init);