Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * SPI init/core code
   3 *
   4 * Copyright (C) 2005 David Brownell
   5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20 */
  21
  22#include <linux/kernel.h>
  23#include <linux/kmod.h>
  24#include <linux/device.h>
  25#include <linux/init.h>
  26#include <linux/cache.h>
 
 
  27#include <linux/mutex.h>
  28#include <linux/of_device.h>
  29#include <linux/of_irq.h>
 
  30#include <linux/slab.h>
  31#include <linux/mod_devicetable.h>
  32#include <linux/spi/spi.h>
 
  33#include <linux/pm_runtime.h>
 
  34#include <linux/export.h>
  35#include <linux/sched.h>
  36#include <linux/delay.h>
  37#include <linux/kthread.h>
 
 
 
 
 
 
  38
  39static void spidev_release(struct device *dev)
  40{
  41	struct spi_device	*spi = to_spi_device(dev);
  42
  43	/* spi masters may cleanup for released devices */
  44	if (spi->master->cleanup)
  45		spi->master->cleanup(spi);
  46
  47	spi_master_put(spi->master);
  48	kfree(spi);
  49}
  50
  51static ssize_t
  52modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  53{
  54	const struct spi_device	*spi = to_spi_device(dev);
 
  55
  56	return sprintf(buf, "%s\n", spi->modalias);
  57}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58
  59static struct device_attribute spi_dev_attrs[] = {
  60	__ATTR_RO(modalias),
  61	__ATTR_NULL,
  62};
  63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
  65 * and the sysfs version makes coldplug work too.
  66 */
  67
  68static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
  69						const struct spi_device *sdev)
  70{
  71	while (id->name[0]) {
  72		if (!strcmp(sdev->modalias, id->name))
  73			return id;
  74		id++;
  75	}
  76	return NULL;
  77}
  78
  79const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
  80{
  81	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
  82
  83	return spi_match_id(sdrv->id_table, sdev);
  84}
  85EXPORT_SYMBOL_GPL(spi_get_device_id);
  86
  87static int spi_match_device(struct device *dev, struct device_driver *drv)
  88{
  89	const struct spi_device	*spi = to_spi_device(dev);
  90	const struct spi_driver	*sdrv = to_spi_driver(drv);
  91
  92	/* Attempt an OF style match */
  93	if (of_driver_match_device(dev, drv))
  94		return 1;
  95
 
 
 
 
  96	if (sdrv->id_table)
  97		return !!spi_match_id(sdrv->id_table, spi);
  98
  99	return strcmp(spi->modalias, drv->name) == 0;
 100}
 101
 102static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
 103{
 104	const struct spi_device		*spi = to_spi_device(dev);
 
 
 
 
 
 105
 106	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
 107	return 0;
 108}
 109
 110#ifdef CONFIG_PM_SLEEP
 111static int spi_legacy_suspend(struct device *dev, pm_message_t message)
 112{
 113	int			value = 0;
 114	struct spi_driver	*drv = to_spi_driver(dev->driver);
 115
 116	/* suspend will stop irqs and dma; no more i/o */
 117	if (drv) {
 118		if (drv->suspend)
 119			value = drv->suspend(to_spi_device(dev), message);
 120		else
 121			dev_dbg(dev, "... can't suspend\n");
 122	}
 123	return value;
 124}
 125
 126static int spi_legacy_resume(struct device *dev)
 127{
 128	int			value = 0;
 129	struct spi_driver	*drv = to_spi_driver(dev->driver);
 130
 131	/* resume may restart the i/o queue */
 132	if (drv) {
 133		if (drv->resume)
 134			value = drv->resume(to_spi_device(dev));
 135		else
 136			dev_dbg(dev, "... can't resume\n");
 137	}
 138	return value;
 139}
 140
 141static int spi_pm_suspend(struct device *dev)
 142{
 143	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 144
 145	if (pm)
 146		return pm_generic_suspend(dev);
 147	else
 148		return spi_legacy_suspend(dev, PMSG_SUSPEND);
 149}
 150
 151static int spi_pm_resume(struct device *dev)
 152{
 153	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 154
 155	if (pm)
 156		return pm_generic_resume(dev);
 157	else
 158		return spi_legacy_resume(dev);
 159}
 160
 161static int spi_pm_freeze(struct device *dev)
 162{
 163	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 164
 165	if (pm)
 166		return pm_generic_freeze(dev);
 167	else
 168		return spi_legacy_suspend(dev, PMSG_FREEZE);
 169}
 170
 171static int spi_pm_thaw(struct device *dev)
 172{
 173	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 174
 175	if (pm)
 176		return pm_generic_thaw(dev);
 177	else
 178		return spi_legacy_resume(dev);
 179}
 180
 181static int spi_pm_poweroff(struct device *dev)
 182{
 183	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 184
 185	if (pm)
 186		return pm_generic_poweroff(dev);
 187	else
 188		return spi_legacy_suspend(dev, PMSG_HIBERNATE);
 189}
 190
 191static int spi_pm_restore(struct device *dev)
 192{
 193	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 194
 195	if (pm)
 196		return pm_generic_restore(dev);
 197	else
 198		return spi_legacy_resume(dev);
 199}
 200#else
 201#define spi_pm_suspend	NULL
 202#define spi_pm_resume	NULL
 203#define spi_pm_freeze	NULL
 204#define spi_pm_thaw	NULL
 205#define spi_pm_poweroff	NULL
 206#define spi_pm_restore	NULL
 207#endif
 208
 209static const struct dev_pm_ops spi_pm = {
 210	.suspend = spi_pm_suspend,
 211	.resume = spi_pm_resume,
 212	.freeze = spi_pm_freeze,
 213	.thaw = spi_pm_thaw,
 214	.poweroff = spi_pm_poweroff,
 215	.restore = spi_pm_restore,
 216	SET_RUNTIME_PM_OPS(
 217		pm_generic_runtime_suspend,
 218		pm_generic_runtime_resume,
 219		pm_generic_runtime_idle
 220	)
 221};
 222
 223struct bus_type spi_bus_type = {
 224	.name		= "spi",
 225	.dev_attrs	= spi_dev_attrs,
 226	.match		= spi_match_device,
 227	.uevent		= spi_uevent,
 228	.pm		= &spi_pm,
 229};
 230EXPORT_SYMBOL_GPL(spi_bus_type);
 231
 232
 233static int spi_drv_probe(struct device *dev)
 234{
 235	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
 
 
 
 
 
 
 236
 237	return sdrv->probe(to_spi_device(dev));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238}
 239
 240static int spi_drv_remove(struct device *dev)
 241{
 242	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
 
 
 
 
 243
 244	return sdrv->remove(to_spi_device(dev));
 245}
 246
 247static void spi_drv_shutdown(struct device *dev)
 248{
 249	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
 250
 251	sdrv->shutdown(to_spi_device(dev));
 252}
 253
 254/**
 255 * spi_register_driver - register a SPI driver
 
 256 * @sdrv: the driver to register
 257 * Context: can sleep
 
 
 258 */
 259int spi_register_driver(struct spi_driver *sdrv)
 260{
 
 261	sdrv->driver.bus = &spi_bus_type;
 262	if (sdrv->probe)
 263		sdrv->driver.probe = spi_drv_probe;
 264	if (sdrv->remove)
 265		sdrv->driver.remove = spi_drv_remove;
 266	if (sdrv->shutdown)
 267		sdrv->driver.shutdown = spi_drv_shutdown;
 268	return driver_register(&sdrv->driver);
 269}
 270EXPORT_SYMBOL_GPL(spi_register_driver);
 271
 272/*-------------------------------------------------------------------------*/
 273
 274/* SPI devices should normally not be created by SPI device drivers; that
 275 * would make them board-specific.  Similarly with SPI master drivers.
 276 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 277 * with other readonly (flashable) information about mainboard devices.
 278 */
 279
 280struct boardinfo {
 281	struct list_head	list;
 282	struct spi_board_info	board_info;
 283};
 284
 285static LIST_HEAD(board_list);
 286static LIST_HEAD(spi_master_list);
 287
 288/*
 289 * Used to protect add/del opertion for board_info list and
 290 * spi_master list, and their matching process
 291 */
 292static DEFINE_MUTEX(board_lock);
 293
 294/**
 295 * spi_alloc_device - Allocate a new SPI device
 296 * @master: Controller to which device is connected
 297 * Context: can sleep
 298 *
 299 * Allows a driver to allocate and initialize a spi_device without
 300 * registering it immediately.  This allows a driver to directly
 301 * fill the spi_device with device parameters before calling
 302 * spi_add_device() on it.
 303 *
 304 * Caller is responsible to call spi_add_device() on the returned
 305 * spi_device structure to add it to the SPI master.  If the caller
 306 * needs to discard the spi_device without adding it, then it should
 307 * call spi_dev_put() on it.
 308 *
 309 * Returns a pointer to the new device, or NULL.
 310 */
 311struct spi_device *spi_alloc_device(struct spi_master *master)
 312{
 313	struct spi_device	*spi;
 314	struct device		*dev = master->dev.parent;
 315
 316	if (!spi_master_get(master))
 317		return NULL;
 318
 319	spi = kzalloc(sizeof *spi, GFP_KERNEL);
 320	if (!spi) {
 321		dev_err(dev, "cannot alloc spi_device\n");
 322		spi_master_put(master);
 323		return NULL;
 324	}
 325
 326	spi->master = master;
 327	spi->dev.parent = &master->dev;
 328	spi->dev.bus = &spi_bus_type;
 329	spi->dev.release = spidev_release;
 
 
 
 
 330	device_initialize(&spi->dev);
 331	return spi;
 332}
 333EXPORT_SYMBOL_GPL(spi_alloc_device);
 334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335/**
 336 * spi_add_device - Add spi_device allocated with spi_alloc_device
 337 * @spi: spi_device to register
 338 *
 339 * Companion function to spi_alloc_device.  Devices allocated with
 340 * spi_alloc_device can be added onto the spi bus with this function.
 341 *
 342 * Returns 0 on success; negative errno on failure
 343 */
 344int spi_add_device(struct spi_device *spi)
 345{
 346	static DEFINE_MUTEX(spi_add_lock);
 347	struct device *dev = spi->master->dev.parent;
 348	struct device *d;
 349	int status;
 350
 351	/* Chipselects are numbered 0..max; validate. */
 352	if (spi->chip_select >= spi->master->num_chipselect) {
 353		dev_err(dev, "cs%d >= max %d\n",
 354			spi->chip_select,
 355			spi->master->num_chipselect);
 356		return -EINVAL;
 357	}
 358
 359	/* Set the bus ID string */
 360	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
 361			spi->chip_select);
 362
 363
 364	/* We need to make sure there's no other device with this
 365	 * chipselect **BEFORE** we call setup(), else we'll trash
 366	 * its configuration.  Lock against concurrent add() calls.
 367	 */
 368	mutex_lock(&spi_add_lock);
 369
 370	d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
 371	if (d != NULL) {
 372		dev_err(dev, "chipselect %d already in use\n",
 373				spi->chip_select);
 374		put_device(d);
 375		status = -EBUSY;
 376		goto done;
 377	}
 378
 
 
 
 379	/* Drivers may modify this initial i/o setup, but will
 380	 * normally rely on the device being setup.  Devices
 381	 * using SPI_CS_HIGH can't coexist well otherwise...
 382	 */
 383	status = spi_setup(spi);
 384	if (status < 0) {
 385		dev_err(dev, "can't setup %s, status %d\n",
 386				dev_name(&spi->dev), status);
 387		goto done;
 388	}
 389
 390	/* Device may be bound to an active driver when this returns */
 391	status = device_add(&spi->dev);
 392	if (status < 0)
 393		dev_err(dev, "can't add %s, status %d\n",
 394				dev_name(&spi->dev), status);
 395	else
 396		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 397
 398done:
 399	mutex_unlock(&spi_add_lock);
 400	return status;
 401}
 402EXPORT_SYMBOL_GPL(spi_add_device);
 403
 404/**
 405 * spi_new_device - instantiate one new SPI device
 406 * @master: Controller to which device is connected
 407 * @chip: Describes the SPI device
 408 * Context: can sleep
 409 *
 410 * On typical mainboards, this is purely internal; and it's not needed
 411 * after board init creates the hard-wired devices.  Some development
 412 * platforms may not be able to use spi_register_board_info though, and
 413 * this is exported so that for example a USB or parport based adapter
 414 * driver could add devices (which it would learn about out-of-band).
 415 *
 416 * Returns the new device, or NULL.
 417 */
 418struct spi_device *spi_new_device(struct spi_master *master,
 419				  struct spi_board_info *chip)
 420{
 421	struct spi_device	*proxy;
 422	int			status;
 423
 424	/* NOTE:  caller did any chip->bus_num checks necessary.
 425	 *
 426	 * Also, unless we change the return value convention to use
 427	 * error-or-pointer (not NULL-or-pointer), troubleshootability
 428	 * suggests syslogged diagnostics are best here (ugh).
 429	 */
 430
 431	proxy = spi_alloc_device(master);
 432	if (!proxy)
 433		return NULL;
 434
 435	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
 436
 437	proxy->chip_select = chip->chip_select;
 438	proxy->max_speed_hz = chip->max_speed_hz;
 439	proxy->mode = chip->mode;
 440	proxy->irq = chip->irq;
 441	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
 442	proxy->dev.platform_data = (void *) chip->platform_data;
 443	proxy->controller_data = chip->controller_data;
 444	proxy->controller_state = NULL;
 445
 446	status = spi_add_device(proxy);
 447	if (status < 0) {
 448		spi_dev_put(proxy);
 449		return NULL;
 450	}
 451
 452	return proxy;
 453}
 454EXPORT_SYMBOL_GPL(spi_new_device);
 455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 456static void spi_match_master_to_boardinfo(struct spi_master *master,
 457				struct spi_board_info *bi)
 458{
 459	struct spi_device *dev;
 460
 461	if (master->bus_num != bi->bus_num)
 462		return;
 463
 464	dev = spi_new_device(master, bi);
 465	if (!dev)
 466		dev_err(master->dev.parent, "can't create new device for %s\n",
 467			bi->modalias);
 468}
 469
 470/**
 471 * spi_register_board_info - register SPI devices for a given board
 472 * @info: array of chip descriptors
 473 * @n: how many descriptors are provided
 474 * Context: can sleep
 475 *
 476 * Board-specific early init code calls this (probably during arch_initcall)
 477 * with segments of the SPI device table.  Any device nodes are created later,
 478 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 479 * this table of devices forever, so that reloading a controller driver will
 480 * not make Linux forget about these hard-wired devices.
 481 *
 482 * Other code can also call this, e.g. a particular add-on board might provide
 483 * SPI devices through its expansion connector, so code initializing that board
 484 * would naturally declare its SPI devices.
 485 *
 486 * The board info passed can safely be __initdata ... but be careful of
 487 * any embedded pointers (platform_data, etc), they're copied as-is.
 
 
 488 */
 489int __devinit
 490spi_register_board_info(struct spi_board_info const *info, unsigned n)
 491{
 492	struct boardinfo *bi;
 493	int i;
 494
 
 
 
 495	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
 496	if (!bi)
 497		return -ENOMEM;
 498
 499	for (i = 0; i < n; i++, bi++, info++) {
 500		struct spi_master *master;
 501
 502		memcpy(&bi->board_info, info, sizeof(*info));
 503		mutex_lock(&board_lock);
 504		list_add_tail(&bi->list, &board_list);
 505		list_for_each_entry(master, &spi_master_list, list)
 506			spi_match_master_to_boardinfo(master, &bi->board_info);
 507		mutex_unlock(&board_lock);
 508	}
 509
 510	return 0;
 511}
 512
 513/*-------------------------------------------------------------------------*/
 514
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 515/**
 516 * spi_pump_messages - kthread work function which processes spi message queue
 517 * @work: pointer to kthread work struct contained in the master struct
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 518 *
 519 * This function checks if there is any spi message in the queue that
 520 * needs processing and if so call out to the driver to initialize hardware
 521 * and transfer each message.
 522 *
 
 
 
 523 */
 524static void spi_pump_messages(struct kthread_work *work)
 525{
 526	struct spi_master *master =
 527		container_of(work, struct spi_master, pump_messages);
 528	unsigned long flags;
 529	bool was_busy = false;
 530	int ret;
 531
 532	/* Lock queue and check for queue work */
 533	spin_lock_irqsave(&master->queue_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 534	if (list_empty(&master->queue) || !master->running) {
 535		if (master->busy && master->unprepare_transfer_hardware) {
 536			ret = master->unprepare_transfer_hardware(master);
 537			if (ret) {
 538				spin_unlock_irqrestore(&master->queue_lock, flags);
 539				dev_err(&master->dev,
 540					"failed to unprepare transfer hardware\n");
 541				return;
 542			}
 
 
 
 543		}
 
 544		master->busy = false;
 
 545		spin_unlock_irqrestore(&master->queue_lock, flags);
 546		return;
 547	}
 548
 549	/* Make sure we are not already running a message */
 550	if (master->cur_msg) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 551		spin_unlock_irqrestore(&master->queue_lock, flags);
 552		return;
 553	}
 
 554	/* Extract head of queue */
 555	master->cur_msg =
 556	    list_entry(master->queue.next, struct spi_message, queue);
 557
 558	list_del_init(&master->cur_msg->queue);
 559	if (master->busy)
 560		was_busy = true;
 561	else
 562		master->busy = true;
 563	spin_unlock_irqrestore(&master->queue_lock, flags);
 564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 565	if (!was_busy && master->prepare_transfer_hardware) {
 566		ret = master->prepare_transfer_hardware(master);
 567		if (ret) {
 568			dev_err(&master->dev,
 569				"failed to prepare transfer hardware\n");
 
 
 
 
 570			return;
 571		}
 572	}
 573
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574	ret = master->transfer_one_message(master, master->cur_msg);
 575	if (ret) {
 576		dev_err(&master->dev,
 577			"failed to transfer one message from queue\n");
 578		return;
 579	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 580}
 581
 582static int spi_init_queue(struct spi_master *master)
 583{
 584	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 585
 586	INIT_LIST_HEAD(&master->queue);
 587	spin_lock_init(&master->queue_lock);
 588
 589	master->running = false;
 590	master->busy = false;
 591
 592	init_kthread_worker(&master->kworker);
 593	master->kworker_task = kthread_run(kthread_worker_fn,
 594					   &master->kworker,
 595					   dev_name(&master->dev));
 596	if (IS_ERR(master->kworker_task)) {
 597		dev_err(&master->dev, "failed to create message pump task\n");
 598		return -ENOMEM;
 599	}
 600	init_kthread_work(&master->pump_messages, spi_pump_messages);
 601
 602	/*
 603	 * Master config will indicate if this controller should run the
 604	 * message pump with high (realtime) priority to reduce the transfer
 605	 * latency on the bus by minimising the delay between a transfer
 606	 * request and the scheduling of the message pump thread. Without this
 607	 * setting the message pump thread will remain at default priority.
 608	 */
 609	if (master->rt) {
 610		dev_info(&master->dev,
 611			"will run message pump with realtime priority\n");
 612		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
 613	}
 614
 615	return 0;
 616}
 617
 618/**
 619 * spi_get_next_queued_message() - called by driver to check for queued
 620 * messages
 621 * @master: the master to check for queued messages
 622 *
 623 * If there are more messages in the queue, the next message is returned from
 624 * this call.
 
 
 625 */
 626struct spi_message *spi_get_next_queued_message(struct spi_master *master)
 627{
 628	struct spi_message *next;
 629	unsigned long flags;
 630
 631	/* get a pointer to the next message, if any */
 632	spin_lock_irqsave(&master->queue_lock, flags);
 633	if (list_empty(&master->queue))
 634		next = NULL;
 635	else
 636		next = list_entry(master->queue.next,
 637				  struct spi_message, queue);
 638	spin_unlock_irqrestore(&master->queue_lock, flags);
 639
 640	return next;
 641}
 642EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
 643
 644/**
 645 * spi_finalize_current_message() - the current message is complete
 646 * @master: the master to return the message to
 647 *
 648 * Called by the driver to notify the core that the message in the front of the
 649 * queue is complete and can be removed from the queue.
 650 */
 651void spi_finalize_current_message(struct spi_master *master)
 652{
 653	struct spi_message *mesg;
 654	unsigned long flags;
 
 655
 656	spin_lock_irqsave(&master->queue_lock, flags);
 657	mesg = master->cur_msg;
 658	master->cur_msg = NULL;
 659
 660	queue_kthread_work(&master->kworker, &master->pump_messages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 661	spin_unlock_irqrestore(&master->queue_lock, flags);
 662
 
 
 663	mesg->state = NULL;
 664	if (mesg->complete)
 665		mesg->complete(mesg->context);
 666}
 667EXPORT_SYMBOL_GPL(spi_finalize_current_message);
 668
 669static int spi_start_queue(struct spi_master *master)
 670{
 671	unsigned long flags;
 672
 673	spin_lock_irqsave(&master->queue_lock, flags);
 674
 675	if (master->running || master->busy) {
 676		spin_unlock_irqrestore(&master->queue_lock, flags);
 677		return -EBUSY;
 678	}
 679
 680	master->running = true;
 681	master->cur_msg = NULL;
 682	spin_unlock_irqrestore(&master->queue_lock, flags);
 683
 684	queue_kthread_work(&master->kworker, &master->pump_messages);
 685
 686	return 0;
 687}
 688
 689static int spi_stop_queue(struct spi_master *master)
 690{
 691	unsigned long flags;
 692	unsigned limit = 500;
 693	int ret = 0;
 694
 695	spin_lock_irqsave(&master->queue_lock, flags);
 696
 697	/*
 698	 * This is a bit lame, but is optimized for the common execution path.
 699	 * A wait_queue on the master->busy could be used, but then the common
 700	 * execution path (pump_messages) would be required to call wake_up or
 701	 * friends on every SPI message. Do this instead.
 702	 */
 703	while ((!list_empty(&master->queue) || master->busy) && limit--) {
 704		spin_unlock_irqrestore(&master->queue_lock, flags);
 705		msleep(10);
 706		spin_lock_irqsave(&master->queue_lock, flags);
 707	}
 708
 709	if (!list_empty(&master->queue) || master->busy)
 710		ret = -EBUSY;
 711	else
 712		master->running = false;
 713
 714	spin_unlock_irqrestore(&master->queue_lock, flags);
 715
 716	if (ret) {
 717		dev_warn(&master->dev,
 718			 "could not stop message queue\n");
 719		return ret;
 720	}
 721	return ret;
 722}
 723
 724static int spi_destroy_queue(struct spi_master *master)
 725{
 726	int ret;
 727
 728	ret = spi_stop_queue(master);
 729
 730	/*
 731	 * flush_kthread_worker will block until all work is done.
 732	 * If the reason that stop_queue timed out is that the work will never
 733	 * finish, then it does no good to call flush/stop thread, so
 734	 * return anyway.
 735	 */
 736	if (ret) {
 737		dev_err(&master->dev, "problem destroying queue\n");
 738		return ret;
 739	}
 740
 741	flush_kthread_worker(&master->kworker);
 742	kthread_stop(master->kworker_task);
 743
 744	return 0;
 745}
 746
 747/**
 748 * spi_queued_transfer - transfer function for queued transfers
 749 * @spi: spi device which is requesting transfer
 750 * @msg: spi message which is to handled is queued to driver queue
 751 */
 752static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
 753{
 754	struct spi_master *master = spi->master;
 755	unsigned long flags;
 756
 757	spin_lock_irqsave(&master->queue_lock, flags);
 758
 759	if (!master->running) {
 760		spin_unlock_irqrestore(&master->queue_lock, flags);
 761		return -ESHUTDOWN;
 762	}
 763	msg->actual_length = 0;
 764	msg->status = -EINPROGRESS;
 765
 766	list_add_tail(&msg->queue, &master->queue);
 767	if (master->running && !master->busy)
 768		queue_kthread_work(&master->kworker, &master->pump_messages);
 769
 770	spin_unlock_irqrestore(&master->queue_lock, flags);
 771	return 0;
 772}
 773
 
 
 
 
 
 
 
 
 
 
 
 
 774static int spi_master_initialize_queue(struct spi_master *master)
 775{
 776	int ret;
 777
 778	master->queued = true;
 779	master->transfer = spi_queued_transfer;
 
 
 780
 781	/* Initialize and start queue */
 782	ret = spi_init_queue(master);
 783	if (ret) {
 784		dev_err(&master->dev, "problem initializing queue\n");
 785		goto err_init_queue;
 786	}
 
 787	ret = spi_start_queue(master);
 788	if (ret) {
 789		dev_err(&master->dev, "problem starting queue\n");
 790		goto err_start_queue;
 791	}
 792
 793	return 0;
 794
 795err_start_queue:
 796err_init_queue:
 797	spi_destroy_queue(master);
 
 798	return ret;
 799}
 800
 801/*-------------------------------------------------------------------------*/
 802
 803#if defined(CONFIG_OF) && !defined(CONFIG_SPARC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 804/**
 805 * of_register_spi_devices() - Register child devices onto the SPI bus
 806 * @master:	Pointer to spi_master device
 807 *
 808 * Registers an spi_device for each child node of master node which has a 'reg'
 809 * property.
 810 */
 811static void of_register_spi_devices(struct spi_master *master)
 812{
 813	struct spi_device *spi;
 814	struct device_node *nc;
 815	const __be32 *prop;
 816	int rc;
 817	int len;
 818
 819	if (!master->dev.of_node)
 820		return;
 821
 822	for_each_child_of_node(master->dev.of_node, nc) {
 823		/* Alloc an spi_device */
 824		spi = spi_alloc_device(master);
 825		if (!spi) {
 826			dev_err(&master->dev, "spi_device alloc error for %s\n",
 827				nc->full_name);
 828			spi_dev_put(spi);
 829			continue;
 830		}
 831
 832		/* Select device driver */
 833		if (of_modalias_node(nc, spi->modalias,
 834				     sizeof(spi->modalias)) < 0) {
 835			dev_err(&master->dev, "cannot find modalias for %s\n",
 836				nc->full_name);
 837			spi_dev_put(spi);
 838			continue;
 839		}
 
 
 
 
 
 840
 841		/* Device address */
 842		prop = of_get_property(nc, "reg", &len);
 843		if (!prop || len < sizeof(*prop)) {
 844			dev_err(&master->dev, "%s has no 'reg' property\n",
 845				nc->full_name);
 846			spi_dev_put(spi);
 847			continue;
 848		}
 849		spi->chip_select = be32_to_cpup(prop);
 850
 851		/* Mode (clock phase/polarity/etc.) */
 852		if (of_find_property(nc, "spi-cpha", NULL))
 853			spi->mode |= SPI_CPHA;
 854		if (of_find_property(nc, "spi-cpol", NULL))
 855			spi->mode |= SPI_CPOL;
 856		if (of_find_property(nc, "spi-cs-high", NULL))
 857			spi->mode |= SPI_CS_HIGH;
 858
 859		/* Device speed */
 860		prop = of_get_property(nc, "spi-max-frequency", &len);
 861		if (!prop || len < sizeof(*prop)) {
 862			dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
 863				nc->full_name);
 864			spi_dev_put(spi);
 865			continue;
 866		}
 867		spi->max_speed_hz = be32_to_cpup(prop);
 868
 869		/* IRQ */
 870		spi->irq = irq_of_parse_and_map(nc, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871
 872		/* Store a pointer to the node in the device structure */
 873		of_node_get(nc);
 874		spi->dev.of_node = nc;
 875
 876		/* Register the new device */
 877		request_module(spi->modalias);
 878		rc = spi_add_device(spi);
 879		if (rc) {
 880			dev_err(&master->dev, "spi_device register error %s\n",
 881				nc->full_name);
 882			spi_dev_put(spi);
 883		}
 
 
 884
 
 
 885	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 886}
 887#else
 888static void of_register_spi_devices(struct spi_master *master) { }
 889#endif
 890
 891static void spi_master_release(struct device *dev)
 892{
 893	struct spi_master *master;
 894
 895	master = container_of(dev, struct spi_master, dev);
 896	kfree(master);
 897}
 898
 899static struct class spi_master_class = {
 900	.name		= "spi_master",
 901	.owner		= THIS_MODULE,
 902	.dev_release	= spi_master_release,
 
 903};
 904
 905
 906
 907/**
 908 * spi_alloc_master - allocate SPI master controller
 909 * @dev: the controller, possibly using the platform_bus
 910 * @size: how much zeroed driver-private data to allocate; the pointer to this
 911 *	memory is in the driver_data field of the returned device,
 912 *	accessible with spi_master_get_devdata().
 913 * Context: can sleep
 914 *
 915 * This call is used only by SPI master controller drivers, which are the
 916 * only ones directly touching chip registers.  It's how they allocate
 917 * an spi_master structure, prior to calling spi_register_master().
 918 *
 919 * This must be called from context that can sleep.  It returns the SPI
 920 * master structure on success, else NULL.
 921 *
 922 * The caller is responsible for assigning the bus number and initializing
 923 * the master's methods before calling spi_register_master(); and (after errors
 924 * adding the device) calling spi_master_put() and kfree() to prevent a memory
 925 * leak.
 
 926 */
 927struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
 928{
 929	struct spi_master	*master;
 930
 931	if (!dev)
 932		return NULL;
 933
 934	master = kzalloc(size + sizeof *master, GFP_KERNEL);
 935	if (!master)
 936		return NULL;
 937
 938	device_initialize(&master->dev);
 939	master->bus_num = -1;
 940	master->num_chipselect = 1;
 941	master->dev.class = &spi_master_class;
 942	master->dev.parent = get_device(dev);
 
 943	spi_master_set_devdata(master, &master[1]);
 944
 945	return master;
 946}
 947EXPORT_SYMBOL_GPL(spi_alloc_master);
 948
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 949/**
 950 * spi_register_master - register SPI master controller
 951 * @master: initialized master, originally from spi_alloc_master()
 952 * Context: can sleep
 953 *
 954 * SPI master controllers connect to their drivers using some non-SPI bus,
 955 * such as the platform bus.  The final stage of probe() in that code
 956 * includes calling spi_register_master() to hook up to this SPI bus glue.
 957 *
 958 * SPI controllers use board specific (often SOC specific) bus numbers,
 959 * and board-specific addressing for SPI devices combines those numbers
 960 * with chip select numbers.  Since SPI does not directly support dynamic
 961 * device identification, boards need configuration tables telling which
 962 * chip is at which address.
 963 *
 964 * This must be called from context that can sleep.  It returns zero on
 965 * success, else a negative error code (dropping the master's refcount).
 966 * After a successful return, the caller is responsible for calling
 967 * spi_unregister_master().
 
 
 968 */
 969int spi_register_master(struct spi_master *master)
 970{
 971	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
 972	struct device		*dev = master->dev.parent;
 973	struct boardinfo	*bi;
 974	int			status = -ENODEV;
 975	int			dynamic = 0;
 976
 977	if (!dev)
 978		return -ENODEV;
 979
 
 
 
 
 980	/* even if it's just one always-selected device, there must
 981	 * be at least one chipselect
 982	 */
 983	if (master->num_chipselect == 0)
 984		return -EINVAL;
 985
 
 
 
 986	/* convention:  dynamically assigned bus IDs count down from the max */
 987	if (master->bus_num < 0) {
 988		/* FIXME switch to an IDR based scheme, something like
 989		 * I2C now uses, so we can't run out of "dynamic" IDs
 990		 */
 991		master->bus_num = atomic_dec_return(&dyn_bus_id);
 992		dynamic = 1;
 993	}
 994
 
 
 995	spin_lock_init(&master->bus_lock_spinlock);
 996	mutex_init(&master->bus_lock_mutex);
 
 997	master->bus_lock_flag = 0;
 
 
 
 998
 999	/* register the device, then userspace will see it.
1000	 * registration fails if the bus ID is in use.
1001	 */
1002	dev_set_name(&master->dev, "spi%u", master->bus_num);
1003	status = device_add(&master->dev);
1004	if (status < 0)
1005		goto done;
1006	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1007			dynamic ? " (dynamic)" : "");
1008
1009	/* If we're using a queued driver, start the queue */
1010	if (master->transfer)
1011		dev_info(dev, "master is unqueued, this is deprecated\n");
1012	else {
1013		status = spi_master_initialize_queue(master);
1014		if (status) {
1015			device_unregister(&master->dev);
1016			goto done;
1017		}
1018	}
 
 
1019
1020	mutex_lock(&board_lock);
1021	list_add_tail(&master->list, &spi_master_list);
1022	list_for_each_entry(bi, &board_list, list)
1023		spi_match_master_to_boardinfo(master, &bi->board_info);
1024	mutex_unlock(&board_lock);
1025
1026	/* Register devices from the device tree */
1027	of_register_spi_devices(master);
 
1028done:
1029	return status;
1030}
1031EXPORT_SYMBOL_GPL(spi_register_master);
1032
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1033static int __unregister(struct device *dev, void *null)
1034{
1035	spi_unregister_device(to_spi_device(dev));
1036	return 0;
1037}
1038
1039/**
1040 * spi_unregister_master - unregister SPI master controller
1041 * @master: the master being unregistered
1042 * Context: can sleep
1043 *
1044 * This call is used only by SPI master controller drivers, which are the
1045 * only ones directly touching chip registers.
1046 *
1047 * This must be called from context that can sleep.
1048 */
1049void spi_unregister_master(struct spi_master *master)
1050{
1051	int dummy;
1052
1053	if (master->queued) {
1054		if (spi_destroy_queue(master))
1055			dev_err(&master->dev, "queue remove failed\n");
1056	}
1057
1058	mutex_lock(&board_lock);
1059	list_del(&master->list);
1060	mutex_unlock(&board_lock);
1061
1062	dummy = device_for_each_child(&master->dev, NULL, __unregister);
1063	device_unregister(&master->dev);
1064}
1065EXPORT_SYMBOL_GPL(spi_unregister_master);
1066
1067int spi_master_suspend(struct spi_master *master)
1068{
1069	int ret;
1070
1071	/* Basically no-ops for non-queued masters */
1072	if (!master->queued)
1073		return 0;
1074
1075	ret = spi_stop_queue(master);
1076	if (ret)
1077		dev_err(&master->dev, "queue stop failed\n");
1078
1079	return ret;
1080}
1081EXPORT_SYMBOL_GPL(spi_master_suspend);
1082
1083int spi_master_resume(struct spi_master *master)
1084{
1085	int ret;
1086
1087	if (!master->queued)
1088		return 0;
1089
1090	ret = spi_start_queue(master);
1091	if (ret)
1092		dev_err(&master->dev, "queue restart failed\n");
1093
1094	return ret;
1095}
1096EXPORT_SYMBOL_GPL(spi_master_resume);
1097
1098static int __spi_master_match(struct device *dev, void *data)
1099{
1100	struct spi_master *m;
1101	u16 *bus_num = data;
1102
1103	m = container_of(dev, struct spi_master, dev);
1104	return m->bus_num == *bus_num;
1105}
1106
1107/**
1108 * spi_busnum_to_master - look up master associated with bus_num
1109 * @bus_num: the master's bus number
1110 * Context: can sleep
1111 *
1112 * This call may be used with devices that are registered after
1113 * arch init time.  It returns a refcounted pointer to the relevant
1114 * spi_master (which the caller must release), or NULL if there is
1115 * no such master registered.
 
 
1116 */
1117struct spi_master *spi_busnum_to_master(u16 bus_num)
1118{
1119	struct device		*dev;
1120	struct spi_master	*master = NULL;
1121
1122	dev = class_find_device(&spi_master_class, NULL, &bus_num,
1123				__spi_master_match);
1124	if (dev)
1125		master = container_of(dev, struct spi_master, dev);
1126	/* reference got in class_find_device */
1127	return master;
1128}
1129EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1131
1132/*-------------------------------------------------------------------------*/
1133
1134/* Core methods for SPI master protocol drivers.  Some of the
1135 * other core methods are currently defined as inline functions.
1136 */
1137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1138/**
1139 * spi_setup - setup SPI mode and clock rate
1140 * @spi: the device whose settings are being modified
1141 * Context: can sleep, and no requests are queued to the device
1142 *
1143 * SPI protocol drivers may need to update the transfer mode if the
1144 * device doesn't work with its default.  They may likewise need
1145 * to update clock rates or word sizes from initial values.  This function
1146 * changes those settings, and must be called from a context that can sleep.
1147 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
1148 * effect the next time the device is selected and data is transferred to
1149 * or from it.  When this function returns, the spi device is deselected.
1150 *
1151 * Note that this call will fail if the protocol driver specifies an option
1152 * that the underlying controller or its driver does not support.  For
1153 * example, not all hardware supports wire transfers using nine bit words,
1154 * LSB-first wire encoding, or active-high chipselects.
 
 
1155 */
1156int spi_setup(struct spi_device *spi)
1157{
1158	unsigned	bad_bits;
1159	int		status;
1160
 
 
 
 
 
 
 
 
 
 
 
 
 
1161	/* help drivers fail *cleanly* when they need options
1162	 * that aren't supported with their current master
1163	 */
1164	bad_bits = spi->mode & ~spi->master->mode_bits;
 
 
 
 
 
 
 
 
 
1165	if (bad_bits) {
1166		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1167			bad_bits);
1168		return -EINVAL;
1169	}
1170
1171	if (!spi->bits_per_word)
1172		spi->bits_per_word = 8;
1173
1174	status = spi->master->setup(spi);
 
 
 
 
 
1175
1176	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
1177				"%u bits/w, %u Hz max --> %d\n",
 
 
 
 
1178			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
1179			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
1180			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
1181			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
1182			(spi->mode & SPI_LOOP) ? "loopback, " : "",
1183			spi->bits_per_word, spi->max_speed_hz,
1184			status);
1185
1186	return status;
1187}
1188EXPORT_SYMBOL_GPL(spi_setup);
1189
1190static int __spi_async(struct spi_device *spi, struct spi_message *message)
1191{
1192	struct spi_master *master = spi->master;
 
 
 
 
 
1193
1194	/* Half-duplex links include original MicroWire, and ones with
1195	 * only one data pin like SPI_3WIRE (switches direction) or where
1196	 * either MOSI or MISO is missing.  They can also be caused by
1197	 * software limitations.
1198	 */
1199	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
1200			|| (spi->mode & SPI_3WIRE)) {
1201		struct spi_transfer *xfer;
1202		unsigned flags = master->flags;
1203
1204		list_for_each_entry(xfer, &message->transfers, transfer_list) {
1205			if (xfer->rx_buf && xfer->tx_buf)
1206				return -EINVAL;
1207			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
1208				return -EINVAL;
1209			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
1210				return -EINVAL;
1211		}
1212	}
1213
1214	message->spi = spi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1215	message->status = -EINPROGRESS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1216	return master->transfer(spi, message);
1217}
1218
1219/**
1220 * spi_async - asynchronous SPI transfer
1221 * @spi: device with which data will be exchanged
1222 * @message: describes the data transfers, including completion callback
1223 * Context: any (irqs may be blocked, etc)
1224 *
1225 * This call may be used in_irq and other contexts which can't sleep,
1226 * as well as from task contexts which can sleep.
1227 *
1228 * The completion callback is invoked in a context which can't sleep.
1229 * Before that invocation, the value of message->status is undefined.
1230 * When the callback is issued, message->status holds either zero (to
1231 * indicate complete success) or a negative error code.  After that
1232 * callback returns, the driver which issued the transfer request may
1233 * deallocate the associated memory; it's no longer in use by any SPI
1234 * core or controller driver code.
1235 *
1236 * Note that although all messages to a spi_device are handled in
1237 * FIFO order, messages may go to different devices in other orders.
1238 * Some device might be higher priority, or have various "hard" access
1239 * time requirements, for example.
1240 *
1241 * On detection of any fault during the transfer, processing of
1242 * the entire message is aborted, and the device is deselected.
1243 * Until returning from the associated message completion callback,
1244 * no other spi_message queued to that device will be processed.
1245 * (This rule applies equally to all the synchronous transfer calls,
1246 * which are wrappers around this core asynchronous primitive.)
 
 
1247 */
1248int spi_async(struct spi_device *spi, struct spi_message *message)
1249{
1250	struct spi_master *master = spi->master;
1251	int ret;
1252	unsigned long flags;
1253
 
 
 
 
1254	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1255
1256	if (master->bus_lock_flag)
1257		ret = -EBUSY;
1258	else
1259		ret = __spi_async(spi, message);
1260
1261	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1262
1263	return ret;
1264}
1265EXPORT_SYMBOL_GPL(spi_async);
1266
1267/**
1268 * spi_async_locked - version of spi_async with exclusive bus usage
1269 * @spi: device with which data will be exchanged
1270 * @message: describes the data transfers, including completion callback
1271 * Context: any (irqs may be blocked, etc)
1272 *
1273 * This call may be used in_irq and other contexts which can't sleep,
1274 * as well as from task contexts which can sleep.
1275 *
1276 * The completion callback is invoked in a context which can't sleep.
1277 * Before that invocation, the value of message->status is undefined.
1278 * When the callback is issued, message->status holds either zero (to
1279 * indicate complete success) or a negative error code.  After that
1280 * callback returns, the driver which issued the transfer request may
1281 * deallocate the associated memory; it's no longer in use by any SPI
1282 * core or controller driver code.
1283 *
1284 * Note that although all messages to a spi_device are handled in
1285 * FIFO order, messages may go to different devices in other orders.
1286 * Some device might be higher priority, or have various "hard" access
1287 * time requirements, for example.
1288 *
1289 * On detection of any fault during the transfer, processing of
1290 * the entire message is aborted, and the device is deselected.
1291 * Until returning from the associated message completion callback,
1292 * no other spi_message queued to that device will be processed.
1293 * (This rule applies equally to all the synchronous transfer calls,
1294 * which are wrappers around this core asynchronous primitive.)
 
 
1295 */
1296int spi_async_locked(struct spi_device *spi, struct spi_message *message)
1297{
1298	struct spi_master *master = spi->master;
1299	int ret;
1300	unsigned long flags;
1301
 
 
 
 
1302	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1303
1304	ret = __spi_async(spi, message);
1305
1306	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1307
1308	return ret;
1309
1310}
1311EXPORT_SYMBOL_GPL(spi_async_locked);
1312
1313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1314/*-------------------------------------------------------------------------*/
1315
1316/* Utility methods for SPI master protocol drivers, layered on
1317 * top of the core.  Some other utility methods are defined as
1318 * inline functions.
1319 */
1320
1321static void spi_complete(void *arg)
1322{
1323	complete(arg);
1324}
1325
1326static int __spi_sync(struct spi_device *spi, struct spi_message *message,
1327		      int bus_locked)
1328{
1329	DECLARE_COMPLETION_ONSTACK(done);
1330	int status;
1331	struct spi_master *master = spi->master;
 
 
 
 
 
1332
1333	message->complete = spi_complete;
1334	message->context = &done;
 
1335
1336	if (!bus_locked)
1337		mutex_lock(&master->bus_lock_mutex);
1338
1339	status = spi_async_locked(spi, message);
 
 
 
 
 
 
 
 
 
 
1340
1341	if (!bus_locked)
1342		mutex_unlock(&master->bus_lock_mutex);
 
 
1343
1344	if (status == 0) {
 
 
 
 
 
 
 
 
 
 
 
1345		wait_for_completion(&done);
1346		status = message->status;
1347	}
1348	message->context = NULL;
1349	return status;
1350}
1351
1352/**
1353 * spi_sync - blocking/synchronous SPI data transfers
1354 * @spi: device with which data will be exchanged
1355 * @message: describes the data transfers
1356 * Context: can sleep
1357 *
1358 * This call may only be used from a context that may sleep.  The sleep
1359 * is non-interruptible, and has no timeout.  Low-overhead controller
1360 * drivers may DMA directly into and out of the message buffers.
1361 *
1362 * Note that the SPI device's chip select is active during the message,
1363 * and then is normally disabled between messages.  Drivers for some
1364 * frequently-used devices may want to minimize costs of selecting a chip,
1365 * by leaving it selected in anticipation that the next message will go
1366 * to the same chip.  (That may increase power usage.)
1367 *
1368 * Also, the caller is guaranteeing that the memory associated with the
1369 * message will not be freed before this call returns.
1370 *
1371 * It returns zero on success, else a negative error code.
1372 */
1373int spi_sync(struct spi_device *spi, struct spi_message *message)
1374{
1375	return __spi_sync(spi, message, 0);
 
 
 
 
 
 
1376}
1377EXPORT_SYMBOL_GPL(spi_sync);
1378
1379/**
1380 * spi_sync_locked - version of spi_sync with exclusive bus usage
1381 * @spi: device with which data will be exchanged
1382 * @message: describes the data transfers
1383 * Context: can sleep
1384 *
1385 * This call may only be used from a context that may sleep.  The sleep
1386 * is non-interruptible, and has no timeout.  Low-overhead controller
1387 * drivers may DMA directly into and out of the message buffers.
1388 *
1389 * This call should be used by drivers that require exclusive access to the
1390 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
1391 * be released by a spi_bus_unlock call when the exclusive access is over.
1392 *
1393 * It returns zero on success, else a negative error code.
1394 */
1395int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
1396{
1397	return __spi_sync(spi, message, 1);
1398}
1399EXPORT_SYMBOL_GPL(spi_sync_locked);
1400
1401/**
1402 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
1403 * @master: SPI bus master that should be locked for exclusive bus access
1404 * Context: can sleep
1405 *
1406 * This call may only be used from a context that may sleep.  The sleep
1407 * is non-interruptible, and has no timeout.
1408 *
1409 * This call should be used by drivers that require exclusive access to the
1410 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
1411 * exclusive access is over. Data transfer must be done by spi_sync_locked
1412 * and spi_async_locked calls when the SPI bus lock is held.
1413 *
1414 * It returns zero on success, else a negative error code.
1415 */
1416int spi_bus_lock(struct spi_master *master)
1417{
1418	unsigned long flags;
1419
1420	mutex_lock(&master->bus_lock_mutex);
1421
1422	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1423	master->bus_lock_flag = 1;
1424	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1425
1426	/* mutex remains locked until spi_bus_unlock is called */
1427
1428	return 0;
1429}
1430EXPORT_SYMBOL_GPL(spi_bus_lock);
1431
1432/**
1433 * spi_bus_unlock - release the lock for exclusive SPI bus usage
1434 * @master: SPI bus master that was locked for exclusive bus access
1435 * Context: can sleep
1436 *
1437 * This call may only be used from a context that may sleep.  The sleep
1438 * is non-interruptible, and has no timeout.
1439 *
1440 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
1441 * call.
1442 *
1443 * It returns zero on success, else a negative error code.
1444 */
1445int spi_bus_unlock(struct spi_master *master)
1446{
1447	master->bus_lock_flag = 0;
1448
1449	mutex_unlock(&master->bus_lock_mutex);
1450
1451	return 0;
1452}
1453EXPORT_SYMBOL_GPL(spi_bus_unlock);
1454
1455/* portable code must never pass more than 32 bytes */
1456#define	SPI_BUFSIZ	max(32,SMP_CACHE_BYTES)
1457
1458static u8	*buf;
1459
1460/**
1461 * spi_write_then_read - SPI synchronous write followed by read
1462 * @spi: device with which data will be exchanged
1463 * @txbuf: data to be written (need not be dma-safe)
1464 * @n_tx: size of txbuf, in bytes
1465 * @rxbuf: buffer into which data will be read (need not be dma-safe)
1466 * @n_rx: size of rxbuf, in bytes
1467 * Context: can sleep
1468 *
1469 * This performs a half duplex MicroWire style transaction with the
1470 * device, sending txbuf and then reading rxbuf.  The return value
1471 * is zero for success, else a negative errno status code.
1472 * This call may only be used from a context that may sleep.
1473 *
1474 * Parameters to this routine are always copied using a small buffer;
1475 * portable code should never use this for more than 32 bytes.
1476 * Performance-sensitive or bulk transfer code should instead use
1477 * spi_{async,sync}() calls with dma-safe buffers.
 
 
1478 */
1479int spi_write_then_read(struct spi_device *spi,
1480		const void *txbuf, unsigned n_tx,
1481		void *rxbuf, unsigned n_rx)
1482{
1483	static DEFINE_MUTEX(lock);
1484
1485	int			status;
1486	struct spi_message	message;
1487	struct spi_transfer	x[2];
1488	u8			*local_buf;
1489
1490	/* Use preallocated DMA-safe buffer.  We can't avoid copying here,
1491	 * (as a pure convenience thing), but we can keep heap costs
1492	 * out of the hot path ...
 
1493	 */
1494	if ((n_tx + n_rx) > SPI_BUFSIZ)
1495		return -EINVAL;
 
 
 
 
 
 
1496
1497	spi_message_init(&message);
1498	memset(x, 0, sizeof x);
1499	if (n_tx) {
1500		x[0].len = n_tx;
1501		spi_message_add_tail(&x[0], &message);
1502	}
1503	if (n_rx) {
1504		x[1].len = n_rx;
1505		spi_message_add_tail(&x[1], &message);
1506	}
1507
1508	/* ... unless someone else is using the pre-allocated buffer */
1509	if (!mutex_trylock(&lock)) {
1510		local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
1511		if (!local_buf)
1512			return -ENOMEM;
1513	} else
1514		local_buf = buf;
1515
1516	memcpy(local_buf, txbuf, n_tx);
1517	x[0].tx_buf = local_buf;
1518	x[1].rx_buf = local_buf + n_tx;
1519
1520	/* do the i/o */
1521	status = spi_sync(spi, &message);
1522	if (status == 0)
1523		memcpy(rxbuf, x[1].rx_buf, n_rx);
1524
1525	if (x[0].tx_buf == buf)
1526		mutex_unlock(&lock);
1527	else
1528		kfree(local_buf);
1529
1530	return status;
1531}
1532EXPORT_SYMBOL_GPL(spi_write_then_read);
1533
1534/*-------------------------------------------------------------------------*/
1535
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1536static int __init spi_init(void)
1537{
1538	int	status;
1539
1540	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
1541	if (!buf) {
1542		status = -ENOMEM;
1543		goto err0;
1544	}
1545
1546	status = bus_register(&spi_bus_type);
1547	if (status < 0)
1548		goto err1;
1549
1550	status = class_register(&spi_master_class);
1551	if (status < 0)
1552		goto err2;
 
 
 
 
 
 
1553	return 0;
1554
1555err2:
1556	bus_unregister(&spi_bus_type);
1557err1:
1558	kfree(buf);
1559	buf = NULL;
1560err0:
1561	return status;
1562}
1563
1564/* board_info is normally registered in arch_initcall(),
1565 * but even essential drivers wait till later
1566 *
1567 * REVISIT only boardinfo really needs static linking. the rest (device and
1568 * driver registration) _could_ be dynamically linked (modular) ... costs
1569 * include needing to have boardinfo data structures be much more public.
1570 */
1571postcore_initcall(spi_init);
1572
v4.10.11
   1/*
   2 * SPI init/core code
   3 *
   4 * Copyright (C) 2005 David Brownell
   5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
 
 
 
 
  16 */
  17
  18#include <linux/kernel.h>
 
  19#include <linux/device.h>
  20#include <linux/init.h>
  21#include <linux/cache.h>
  22#include <linux/dma-mapping.h>
  23#include <linux/dmaengine.h>
  24#include <linux/mutex.h>
  25#include <linux/of_device.h>
  26#include <linux/of_irq.h>
  27#include <linux/clk/clk-conf.h>
  28#include <linux/slab.h>
  29#include <linux/mod_devicetable.h>
  30#include <linux/spi/spi.h>
  31#include <linux/of_gpio.h>
  32#include <linux/pm_runtime.h>
  33#include <linux/pm_domain.h>
  34#include <linux/export.h>
  35#include <linux/sched/rt.h>
  36#include <linux/delay.h>
  37#include <linux/kthread.h>
  38#include <linux/ioport.h>
  39#include <linux/acpi.h>
  40#include <linux/highmem.h>
  41
  42#define CREATE_TRACE_POINTS
  43#include <trace/events/spi.h>
  44
  45static void spidev_release(struct device *dev)
  46{
  47	struct spi_device	*spi = to_spi_device(dev);
  48
  49	/* spi masters may cleanup for released devices */
  50	if (spi->master->cleanup)
  51		spi->master->cleanup(spi);
  52
  53	spi_master_put(spi->master);
  54	kfree(spi);
  55}
  56
  57static ssize_t
  58modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  59{
  60	const struct spi_device	*spi = to_spi_device(dev);
  61	int len;
  62
  63	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  64	if (len != -ENODEV)
  65		return len;
  66
  67	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  68}
  69static DEVICE_ATTR_RO(modalias);
  70
  71#define SPI_STATISTICS_ATTRS(field, file)				\
  72static ssize_t spi_master_##field##_show(struct device *dev,		\
  73					 struct device_attribute *attr,	\
  74					 char *buf)			\
  75{									\
  76	struct spi_master *master = container_of(dev,			\
  77						 struct spi_master, dev); \
  78	return spi_statistics_##field##_show(&master->statistics, buf);	\
  79}									\
  80static struct device_attribute dev_attr_spi_master_##field = {		\
  81	.attr = { .name = file, .mode = S_IRUGO },			\
  82	.show = spi_master_##field##_show,				\
  83};									\
  84static ssize_t spi_device_##field##_show(struct device *dev,		\
  85					 struct device_attribute *attr,	\
  86					char *buf)			\
  87{									\
  88	struct spi_device *spi = to_spi_device(dev);			\
  89	return spi_statistics_##field##_show(&spi->statistics, buf);	\
  90}									\
  91static struct device_attribute dev_attr_spi_device_##field = {		\
  92	.attr = { .name = file, .mode = S_IRUGO },			\
  93	.show = spi_device_##field##_show,				\
  94}
  95
  96#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)	\
  97static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
  98					    char *buf)			\
  99{									\
 100	unsigned long flags;						\
 101	ssize_t len;							\
 102	spin_lock_irqsave(&stat->lock, flags);				\
 103	len = sprintf(buf, format_string, stat->field);			\
 104	spin_unlock_irqrestore(&stat->lock, flags);			\
 105	return len;							\
 106}									\
 107SPI_STATISTICS_ATTRS(name, file)
 108
 109#define SPI_STATISTICS_SHOW(field, format_string)			\
 110	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
 111				 field, format_string)
 112
 113SPI_STATISTICS_SHOW(messages, "%lu");
 114SPI_STATISTICS_SHOW(transfers, "%lu");
 115SPI_STATISTICS_SHOW(errors, "%lu");
 116SPI_STATISTICS_SHOW(timedout, "%lu");
 117
 118SPI_STATISTICS_SHOW(spi_sync, "%lu");
 119SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
 120SPI_STATISTICS_SHOW(spi_async, "%lu");
 121
 122SPI_STATISTICS_SHOW(bytes, "%llu");
 123SPI_STATISTICS_SHOW(bytes_rx, "%llu");
 124SPI_STATISTICS_SHOW(bytes_tx, "%llu");
 125
 126#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
 127	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
 128				 "transfer_bytes_histo_" number,	\
 129				 transfer_bytes_histo[index],  "%lu")
 130SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
 131SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
 132SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
 133SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
 134SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
 135SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
 136SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
 137SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
 138SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
 139SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
 140SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
 141SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
 142SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
 143SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
 144SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
 145SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
 146SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
 147
 148SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
 149
 150static struct attribute *spi_dev_attrs[] = {
 151	&dev_attr_modalias.attr,
 152	NULL,
 153};
 154
 155static const struct attribute_group spi_dev_group = {
 156	.attrs  = spi_dev_attrs,
 157};
 158
 159static struct attribute *spi_device_statistics_attrs[] = {
 160	&dev_attr_spi_device_messages.attr,
 161	&dev_attr_spi_device_transfers.attr,
 162	&dev_attr_spi_device_errors.attr,
 163	&dev_attr_spi_device_timedout.attr,
 164	&dev_attr_spi_device_spi_sync.attr,
 165	&dev_attr_spi_device_spi_sync_immediate.attr,
 166	&dev_attr_spi_device_spi_async.attr,
 167	&dev_attr_spi_device_bytes.attr,
 168	&dev_attr_spi_device_bytes_rx.attr,
 169	&dev_attr_spi_device_bytes_tx.attr,
 170	&dev_attr_spi_device_transfer_bytes_histo0.attr,
 171	&dev_attr_spi_device_transfer_bytes_histo1.attr,
 172	&dev_attr_spi_device_transfer_bytes_histo2.attr,
 173	&dev_attr_spi_device_transfer_bytes_histo3.attr,
 174	&dev_attr_spi_device_transfer_bytes_histo4.attr,
 175	&dev_attr_spi_device_transfer_bytes_histo5.attr,
 176	&dev_attr_spi_device_transfer_bytes_histo6.attr,
 177	&dev_attr_spi_device_transfer_bytes_histo7.attr,
 178	&dev_attr_spi_device_transfer_bytes_histo8.attr,
 179	&dev_attr_spi_device_transfer_bytes_histo9.attr,
 180	&dev_attr_spi_device_transfer_bytes_histo10.attr,
 181	&dev_attr_spi_device_transfer_bytes_histo11.attr,
 182	&dev_attr_spi_device_transfer_bytes_histo12.attr,
 183	&dev_attr_spi_device_transfer_bytes_histo13.attr,
 184	&dev_attr_spi_device_transfer_bytes_histo14.attr,
 185	&dev_attr_spi_device_transfer_bytes_histo15.attr,
 186	&dev_attr_spi_device_transfer_bytes_histo16.attr,
 187	&dev_attr_spi_device_transfers_split_maxsize.attr,
 188	NULL,
 189};
 190
 191static const struct attribute_group spi_device_statistics_group = {
 192	.name  = "statistics",
 193	.attrs  = spi_device_statistics_attrs,
 194};
 195
 196static const struct attribute_group *spi_dev_groups[] = {
 197	&spi_dev_group,
 198	&spi_device_statistics_group,
 199	NULL,
 200};
 201
 202static struct attribute *spi_master_statistics_attrs[] = {
 203	&dev_attr_spi_master_messages.attr,
 204	&dev_attr_spi_master_transfers.attr,
 205	&dev_attr_spi_master_errors.attr,
 206	&dev_attr_spi_master_timedout.attr,
 207	&dev_attr_spi_master_spi_sync.attr,
 208	&dev_attr_spi_master_spi_sync_immediate.attr,
 209	&dev_attr_spi_master_spi_async.attr,
 210	&dev_attr_spi_master_bytes.attr,
 211	&dev_attr_spi_master_bytes_rx.attr,
 212	&dev_attr_spi_master_bytes_tx.attr,
 213	&dev_attr_spi_master_transfer_bytes_histo0.attr,
 214	&dev_attr_spi_master_transfer_bytes_histo1.attr,
 215	&dev_attr_spi_master_transfer_bytes_histo2.attr,
 216	&dev_attr_spi_master_transfer_bytes_histo3.attr,
 217	&dev_attr_spi_master_transfer_bytes_histo4.attr,
 218	&dev_attr_spi_master_transfer_bytes_histo5.attr,
 219	&dev_attr_spi_master_transfer_bytes_histo6.attr,
 220	&dev_attr_spi_master_transfer_bytes_histo7.attr,
 221	&dev_attr_spi_master_transfer_bytes_histo8.attr,
 222	&dev_attr_spi_master_transfer_bytes_histo9.attr,
 223	&dev_attr_spi_master_transfer_bytes_histo10.attr,
 224	&dev_attr_spi_master_transfer_bytes_histo11.attr,
 225	&dev_attr_spi_master_transfer_bytes_histo12.attr,
 226	&dev_attr_spi_master_transfer_bytes_histo13.attr,
 227	&dev_attr_spi_master_transfer_bytes_histo14.attr,
 228	&dev_attr_spi_master_transfer_bytes_histo15.attr,
 229	&dev_attr_spi_master_transfer_bytes_histo16.attr,
 230	&dev_attr_spi_master_transfers_split_maxsize.attr,
 231	NULL,
 232};
 233
 234static const struct attribute_group spi_master_statistics_group = {
 235	.name  = "statistics",
 236	.attrs  = spi_master_statistics_attrs,
 237};
 238
 239static const struct attribute_group *spi_master_groups[] = {
 240	&spi_master_statistics_group,
 241	NULL,
 242};
 243
 244void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 245				       struct spi_transfer *xfer,
 246				       struct spi_master *master)
 247{
 248	unsigned long flags;
 249	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
 250
 251	if (l2len < 0)
 252		l2len = 0;
 253
 254	spin_lock_irqsave(&stats->lock, flags);
 255
 256	stats->transfers++;
 257	stats->transfer_bytes_histo[l2len]++;
 258
 259	stats->bytes += xfer->len;
 260	if ((xfer->tx_buf) &&
 261	    (xfer->tx_buf != master->dummy_tx))
 262		stats->bytes_tx += xfer->len;
 263	if ((xfer->rx_buf) &&
 264	    (xfer->rx_buf != master->dummy_rx))
 265		stats->bytes_rx += xfer->len;
 266
 267	spin_unlock_irqrestore(&stats->lock, flags);
 268}
 269EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
 270
 271/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 272 * and the sysfs version makes coldplug work too.
 273 */
 274
 275static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
 276						const struct spi_device *sdev)
 277{
 278	while (id->name[0]) {
 279		if (!strcmp(sdev->modalias, id->name))
 280			return id;
 281		id++;
 282	}
 283	return NULL;
 284}
 285
 286const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
 287{
 288	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
 289
 290	return spi_match_id(sdrv->id_table, sdev);
 291}
 292EXPORT_SYMBOL_GPL(spi_get_device_id);
 293
 294static int spi_match_device(struct device *dev, struct device_driver *drv)
 295{
 296	const struct spi_device	*spi = to_spi_device(dev);
 297	const struct spi_driver	*sdrv = to_spi_driver(drv);
 298
 299	/* Attempt an OF style match */
 300	if (of_driver_match_device(dev, drv))
 301		return 1;
 302
 303	/* Then try ACPI */
 304	if (acpi_driver_match_device(dev, drv))
 305		return 1;
 306
 307	if (sdrv->id_table)
 308		return !!spi_match_id(sdrv->id_table, spi);
 309
 310	return strcmp(spi->modalias, drv->name) == 0;
 311}
 312
 313static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
 314{
 315	const struct spi_device		*spi = to_spi_device(dev);
 316	int rc;
 317
 318	rc = acpi_device_uevent_modalias(dev, env);
 319	if (rc != -ENODEV)
 320		return rc;
 321
 322	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
 323	return 0;
 324}
 325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 326struct bus_type spi_bus_type = {
 327	.name		= "spi",
 328	.dev_groups	= spi_dev_groups,
 329	.match		= spi_match_device,
 330	.uevent		= spi_uevent,
 
 331};
 332EXPORT_SYMBOL_GPL(spi_bus_type);
 333
 334
 335static int spi_drv_probe(struct device *dev)
 336{
 337	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
 338	struct spi_device		*spi = to_spi_device(dev);
 339	int ret;
 340
 341	ret = of_clk_set_defaults(dev->of_node, false);
 342	if (ret)
 343		return ret;
 344
 345	if (dev->of_node) {
 346		spi->irq = of_irq_get(dev->of_node, 0);
 347		if (spi->irq == -EPROBE_DEFER)
 348			return -EPROBE_DEFER;
 349		if (spi->irq < 0)
 350			spi->irq = 0;
 351	}
 352
 353	ret = dev_pm_domain_attach(dev, true);
 354	if (ret != -EPROBE_DEFER) {
 355		ret = sdrv->probe(spi);
 356		if (ret)
 357			dev_pm_domain_detach(dev, true);
 358	}
 359
 360	return ret;
 361}
 362
 363static int spi_drv_remove(struct device *dev)
 364{
 365	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
 366	int ret;
 367
 368	ret = sdrv->remove(to_spi_device(dev));
 369	dev_pm_domain_detach(dev, true);
 370
 371	return ret;
 372}
 373
 374static void spi_drv_shutdown(struct device *dev)
 375{
 376	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
 377
 378	sdrv->shutdown(to_spi_device(dev));
 379}
 380
 381/**
 382 * __spi_register_driver - register a SPI driver
 383 * @owner: owner module of the driver to register
 384 * @sdrv: the driver to register
 385 * Context: can sleep
 386 *
 387 * Return: zero on success, else a negative error code.
 388 */
 389int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
 390{
 391	sdrv->driver.owner = owner;
 392	sdrv->driver.bus = &spi_bus_type;
 393	if (sdrv->probe)
 394		sdrv->driver.probe = spi_drv_probe;
 395	if (sdrv->remove)
 396		sdrv->driver.remove = spi_drv_remove;
 397	if (sdrv->shutdown)
 398		sdrv->driver.shutdown = spi_drv_shutdown;
 399	return driver_register(&sdrv->driver);
 400}
 401EXPORT_SYMBOL_GPL(__spi_register_driver);
 402
 403/*-------------------------------------------------------------------------*/
 404
 405/* SPI devices should normally not be created by SPI device drivers; that
 406 * would make them board-specific.  Similarly with SPI master drivers.
 407 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 408 * with other readonly (flashable) information about mainboard devices.
 409 */
 410
 411struct boardinfo {
 412	struct list_head	list;
 413	struct spi_board_info	board_info;
 414};
 415
 416static LIST_HEAD(board_list);
 417static LIST_HEAD(spi_master_list);
 418
 419/*
 420 * Used to protect add/del opertion for board_info list and
 421 * spi_master list, and their matching process
 422 */
 423static DEFINE_MUTEX(board_lock);
 424
 425/**
 426 * spi_alloc_device - Allocate a new SPI device
 427 * @master: Controller to which device is connected
 428 * Context: can sleep
 429 *
 430 * Allows a driver to allocate and initialize a spi_device without
 431 * registering it immediately.  This allows a driver to directly
 432 * fill the spi_device with device parameters before calling
 433 * spi_add_device() on it.
 434 *
 435 * Caller is responsible to call spi_add_device() on the returned
 436 * spi_device structure to add it to the SPI master.  If the caller
 437 * needs to discard the spi_device without adding it, then it should
 438 * call spi_dev_put() on it.
 439 *
 440 * Return: a pointer to the new device, or NULL.
 441 */
 442struct spi_device *spi_alloc_device(struct spi_master *master)
 443{
 444	struct spi_device	*spi;
 
 445
 446	if (!spi_master_get(master))
 447		return NULL;
 448
 449	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
 450	if (!spi) {
 
 451		spi_master_put(master);
 452		return NULL;
 453	}
 454
 455	spi->master = master;
 456	spi->dev.parent = &master->dev;
 457	spi->dev.bus = &spi_bus_type;
 458	spi->dev.release = spidev_release;
 459	spi->cs_gpio = -ENOENT;
 460
 461	spin_lock_init(&spi->statistics.lock);
 462
 463	device_initialize(&spi->dev);
 464	return spi;
 465}
 466EXPORT_SYMBOL_GPL(spi_alloc_device);
 467
 468static void spi_dev_set_name(struct spi_device *spi)
 469{
 470	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
 471
 472	if (adev) {
 473		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
 474		return;
 475	}
 476
 477	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
 478		     spi->chip_select);
 479}
 480
 481static int spi_dev_check(struct device *dev, void *data)
 482{
 483	struct spi_device *spi = to_spi_device(dev);
 484	struct spi_device *new_spi = data;
 485
 486	if (spi->master == new_spi->master &&
 487	    spi->chip_select == new_spi->chip_select)
 488		return -EBUSY;
 489	return 0;
 490}
 491
 492/**
 493 * spi_add_device - Add spi_device allocated with spi_alloc_device
 494 * @spi: spi_device to register
 495 *
 496 * Companion function to spi_alloc_device.  Devices allocated with
 497 * spi_alloc_device can be added onto the spi bus with this function.
 498 *
 499 * Return: 0 on success; negative errno on failure
 500 */
 501int spi_add_device(struct spi_device *spi)
 502{
 503	static DEFINE_MUTEX(spi_add_lock);
 504	struct spi_master *master = spi->master;
 505	struct device *dev = master->dev.parent;
 506	int status;
 507
 508	/* Chipselects are numbered 0..max; validate. */
 509	if (spi->chip_select >= master->num_chipselect) {
 510		dev_err(dev, "cs%d >= max %d\n",
 511			spi->chip_select,
 512			master->num_chipselect);
 513		return -EINVAL;
 514	}
 515
 516	/* Set the bus ID string */
 517	spi_dev_set_name(spi);
 
 
 518
 519	/* We need to make sure there's no other device with this
 520	 * chipselect **BEFORE** we call setup(), else we'll trash
 521	 * its configuration.  Lock against concurrent add() calls.
 522	 */
 523	mutex_lock(&spi_add_lock);
 524
 525	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
 526	if (status) {
 527		dev_err(dev, "chipselect %d already in use\n",
 528				spi->chip_select);
 
 
 529		goto done;
 530	}
 531
 532	if (master->cs_gpios)
 533		spi->cs_gpio = master->cs_gpios[spi->chip_select];
 534
 535	/* Drivers may modify this initial i/o setup, but will
 536	 * normally rely on the device being setup.  Devices
 537	 * using SPI_CS_HIGH can't coexist well otherwise...
 538	 */
 539	status = spi_setup(spi);
 540	if (status < 0) {
 541		dev_err(dev, "can't setup %s, status %d\n",
 542				dev_name(&spi->dev), status);
 543		goto done;
 544	}
 545
 546	/* Device may be bound to an active driver when this returns */
 547	status = device_add(&spi->dev);
 548	if (status < 0)
 549		dev_err(dev, "can't add %s, status %d\n",
 550				dev_name(&spi->dev), status);
 551	else
 552		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 553
 554done:
 555	mutex_unlock(&spi_add_lock);
 556	return status;
 557}
 558EXPORT_SYMBOL_GPL(spi_add_device);
 559
 560/**
 561 * spi_new_device - instantiate one new SPI device
 562 * @master: Controller to which device is connected
 563 * @chip: Describes the SPI device
 564 * Context: can sleep
 565 *
 566 * On typical mainboards, this is purely internal; and it's not needed
 567 * after board init creates the hard-wired devices.  Some development
 568 * platforms may not be able to use spi_register_board_info though, and
 569 * this is exported so that for example a USB or parport based adapter
 570 * driver could add devices (which it would learn about out-of-band).
 571 *
 572 * Return: the new device, or NULL.
 573 */
 574struct spi_device *spi_new_device(struct spi_master *master,
 575				  struct spi_board_info *chip)
 576{
 577	struct spi_device	*proxy;
 578	int			status;
 579
 580	/* NOTE:  caller did any chip->bus_num checks necessary.
 581	 *
 582	 * Also, unless we change the return value convention to use
 583	 * error-or-pointer (not NULL-or-pointer), troubleshootability
 584	 * suggests syslogged diagnostics are best here (ugh).
 585	 */
 586
 587	proxy = spi_alloc_device(master);
 588	if (!proxy)
 589		return NULL;
 590
 591	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
 592
 593	proxy->chip_select = chip->chip_select;
 594	proxy->max_speed_hz = chip->max_speed_hz;
 595	proxy->mode = chip->mode;
 596	proxy->irq = chip->irq;
 597	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
 598	proxy->dev.platform_data = (void *) chip->platform_data;
 599	proxy->controller_data = chip->controller_data;
 600	proxy->controller_state = NULL;
 601
 602	status = spi_add_device(proxy);
 603	if (status < 0) {
 604		spi_dev_put(proxy);
 605		return NULL;
 606	}
 607
 608	return proxy;
 609}
 610EXPORT_SYMBOL_GPL(spi_new_device);
 611
 612/**
 613 * spi_unregister_device - unregister a single SPI device
 614 * @spi: spi_device to unregister
 615 *
 616 * Start making the passed SPI device vanish. Normally this would be handled
 617 * by spi_unregister_master().
 618 */
 619void spi_unregister_device(struct spi_device *spi)
 620{
 621	if (!spi)
 622		return;
 623
 624	if (spi->dev.of_node)
 625		of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
 626	if (ACPI_COMPANION(&spi->dev))
 627		acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
 628	device_unregister(&spi->dev);
 629}
 630EXPORT_SYMBOL_GPL(spi_unregister_device);
 631
 632static void spi_match_master_to_boardinfo(struct spi_master *master,
 633				struct spi_board_info *bi)
 634{
 635	struct spi_device *dev;
 636
 637	if (master->bus_num != bi->bus_num)
 638		return;
 639
 640	dev = spi_new_device(master, bi);
 641	if (!dev)
 642		dev_err(master->dev.parent, "can't create new device for %s\n",
 643			bi->modalias);
 644}
 645
 646/**
 647 * spi_register_board_info - register SPI devices for a given board
 648 * @info: array of chip descriptors
 649 * @n: how many descriptors are provided
 650 * Context: can sleep
 651 *
 652 * Board-specific early init code calls this (probably during arch_initcall)
 653 * with segments of the SPI device table.  Any device nodes are created later,
 654 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 655 * this table of devices forever, so that reloading a controller driver will
 656 * not make Linux forget about these hard-wired devices.
 657 *
 658 * Other code can also call this, e.g. a particular add-on board might provide
 659 * SPI devices through its expansion connector, so code initializing that board
 660 * would naturally declare its SPI devices.
 661 *
 662 * The board info passed can safely be __initdata ... but be careful of
 663 * any embedded pointers (platform_data, etc), they're copied as-is.
 664 *
 665 * Return: zero on success, else a negative error code.
 666 */
 667int spi_register_board_info(struct spi_board_info const *info, unsigned n)
 
 668{
 669	struct boardinfo *bi;
 670	int i;
 671
 672	if (!n)
 673		return -EINVAL;
 674
 675	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
 676	if (!bi)
 677		return -ENOMEM;
 678
 679	for (i = 0; i < n; i++, bi++, info++) {
 680		struct spi_master *master;
 681
 682		memcpy(&bi->board_info, info, sizeof(*info));
 683		mutex_lock(&board_lock);
 684		list_add_tail(&bi->list, &board_list);
 685		list_for_each_entry(master, &spi_master_list, list)
 686			spi_match_master_to_boardinfo(master, &bi->board_info);
 687		mutex_unlock(&board_lock);
 688	}
 689
 690	return 0;
 691}
 692
 693/*-------------------------------------------------------------------------*/
 694
 695static void spi_set_cs(struct spi_device *spi, bool enable)
 696{
 697	if (spi->mode & SPI_CS_HIGH)
 698		enable = !enable;
 699
 700	if (gpio_is_valid(spi->cs_gpio)) {
 701		gpio_set_value(spi->cs_gpio, !enable);
 702		/* Some SPI masters need both GPIO CS & slave_select */
 703		if ((spi->master->flags & SPI_MASTER_GPIO_SS) &&
 704		    spi->master->set_cs)
 705			spi->master->set_cs(spi, !enable);
 706	} else if (spi->master->set_cs) {
 707		spi->master->set_cs(spi, !enable);
 708	}
 709}
 710
 711#ifdef CONFIG_HAS_DMA
 712static int spi_map_buf(struct spi_master *master, struct device *dev,
 713		       struct sg_table *sgt, void *buf, size_t len,
 714		       enum dma_data_direction dir)
 715{
 716	const bool vmalloced_buf = is_vmalloc_addr(buf);
 717	unsigned int max_seg_size = dma_get_max_seg_size(dev);
 718#ifdef CONFIG_HIGHMEM
 719	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
 720				(unsigned long)buf < (PKMAP_BASE +
 721					(LAST_PKMAP * PAGE_SIZE)));
 722#else
 723	const bool kmap_buf = false;
 724#endif
 725	int desc_len;
 726	int sgs;
 727	struct page *vm_page;
 728	struct scatterlist *sg;
 729	void *sg_buf;
 730	size_t min;
 731	int i, ret;
 732
 733	if (vmalloced_buf || kmap_buf) {
 734		desc_len = min_t(int, max_seg_size, PAGE_SIZE);
 735		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
 736	} else if (virt_addr_valid(buf)) {
 737		desc_len = min_t(int, max_seg_size, master->max_dma_len);
 738		sgs = DIV_ROUND_UP(len, desc_len);
 739	} else {
 740		return -EINVAL;
 741	}
 742
 743	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
 744	if (ret != 0)
 745		return ret;
 746
 747	sg = &sgt->sgl[0];
 748	for (i = 0; i < sgs; i++) {
 749
 750		if (vmalloced_buf || kmap_buf) {
 751			min = min_t(size_t,
 752				    len, desc_len - offset_in_page(buf));
 753			if (vmalloced_buf)
 754				vm_page = vmalloc_to_page(buf);
 755			else
 756				vm_page = kmap_to_page(buf);
 757			if (!vm_page) {
 758				sg_free_table(sgt);
 759				return -ENOMEM;
 760			}
 761			sg_set_page(sg, vm_page,
 762				    min, offset_in_page(buf));
 763		} else {
 764			min = min_t(size_t, len, desc_len);
 765			sg_buf = buf;
 766			sg_set_buf(sg, sg_buf, min);
 767		}
 768
 769		buf += min;
 770		len -= min;
 771		sg = sg_next(sg);
 772	}
 773
 774	ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
 775	if (!ret)
 776		ret = -ENOMEM;
 777	if (ret < 0) {
 778		sg_free_table(sgt);
 779		return ret;
 780	}
 781
 782	sgt->nents = ret;
 783
 784	return 0;
 785}
 786
 787static void spi_unmap_buf(struct spi_master *master, struct device *dev,
 788			  struct sg_table *sgt, enum dma_data_direction dir)
 789{
 790	if (sgt->orig_nents) {
 791		dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
 792		sg_free_table(sgt);
 793	}
 794}
 795
 796static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
 797{
 798	struct device *tx_dev, *rx_dev;
 799	struct spi_transfer *xfer;
 800	int ret;
 801
 802	if (!master->can_dma)
 803		return 0;
 804
 805	if (master->dma_tx)
 806		tx_dev = master->dma_tx->device->dev;
 807	else
 808		tx_dev = &master->dev;
 809
 810	if (master->dma_rx)
 811		rx_dev = master->dma_rx->device->dev;
 812	else
 813		rx_dev = &master->dev;
 814
 815	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 816		if (!master->can_dma(master, msg->spi, xfer))
 817			continue;
 818
 819		if (xfer->tx_buf != NULL) {
 820			ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
 821					  (void *)xfer->tx_buf, xfer->len,
 822					  DMA_TO_DEVICE);
 823			if (ret != 0)
 824				return ret;
 825		}
 826
 827		if (xfer->rx_buf != NULL) {
 828			ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
 829					  xfer->rx_buf, xfer->len,
 830					  DMA_FROM_DEVICE);
 831			if (ret != 0) {
 832				spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
 833					      DMA_TO_DEVICE);
 834				return ret;
 835			}
 836		}
 837	}
 838
 839	master->cur_msg_mapped = true;
 840
 841	return 0;
 842}
 843
 844static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
 845{
 846	struct spi_transfer *xfer;
 847	struct device *tx_dev, *rx_dev;
 848
 849	if (!master->cur_msg_mapped || !master->can_dma)
 850		return 0;
 851
 852	if (master->dma_tx)
 853		tx_dev = master->dma_tx->device->dev;
 854	else
 855		tx_dev = &master->dev;
 856
 857	if (master->dma_rx)
 858		rx_dev = master->dma_rx->device->dev;
 859	else
 860		rx_dev = &master->dev;
 861
 862	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 863		if (!master->can_dma(master, msg->spi, xfer))
 864			continue;
 865
 866		spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
 867		spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
 868	}
 869
 870	return 0;
 871}
 872#else /* !CONFIG_HAS_DMA */
 873static inline int spi_map_buf(struct spi_master *master,
 874			      struct device *dev, struct sg_table *sgt,
 875			      void *buf, size_t len,
 876			      enum dma_data_direction dir)
 877{
 878	return -EINVAL;
 879}
 880
 881static inline void spi_unmap_buf(struct spi_master *master,
 882				 struct device *dev, struct sg_table *sgt,
 883				 enum dma_data_direction dir)
 884{
 885}
 886
 887static inline int __spi_map_msg(struct spi_master *master,
 888				struct spi_message *msg)
 889{
 890	return 0;
 891}
 892
 893static inline int __spi_unmap_msg(struct spi_master *master,
 894				  struct spi_message *msg)
 895{
 896	return 0;
 897}
 898#endif /* !CONFIG_HAS_DMA */
 899
 900static inline int spi_unmap_msg(struct spi_master *master,
 901				struct spi_message *msg)
 902{
 903	struct spi_transfer *xfer;
 904
 905	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 906		/*
 907		 * Restore the original value of tx_buf or rx_buf if they are
 908		 * NULL.
 909		 */
 910		if (xfer->tx_buf == master->dummy_tx)
 911			xfer->tx_buf = NULL;
 912		if (xfer->rx_buf == master->dummy_rx)
 913			xfer->rx_buf = NULL;
 914	}
 915
 916	return __spi_unmap_msg(master, msg);
 917}
 918
 919static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
 920{
 921	struct spi_transfer *xfer;
 922	void *tmp;
 923	unsigned int max_tx, max_rx;
 924
 925	if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
 926		max_tx = 0;
 927		max_rx = 0;
 928
 929		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 930			if ((master->flags & SPI_MASTER_MUST_TX) &&
 931			    !xfer->tx_buf)
 932				max_tx = max(xfer->len, max_tx);
 933			if ((master->flags & SPI_MASTER_MUST_RX) &&
 934			    !xfer->rx_buf)
 935				max_rx = max(xfer->len, max_rx);
 936		}
 937
 938		if (max_tx) {
 939			tmp = krealloc(master->dummy_tx, max_tx,
 940				       GFP_KERNEL | GFP_DMA);
 941			if (!tmp)
 942				return -ENOMEM;
 943			master->dummy_tx = tmp;
 944			memset(tmp, 0, max_tx);
 945		}
 946
 947		if (max_rx) {
 948			tmp = krealloc(master->dummy_rx, max_rx,
 949				       GFP_KERNEL | GFP_DMA);
 950			if (!tmp)
 951				return -ENOMEM;
 952			master->dummy_rx = tmp;
 953		}
 954
 955		if (max_tx || max_rx) {
 956			list_for_each_entry(xfer, &msg->transfers,
 957					    transfer_list) {
 958				if (!xfer->tx_buf)
 959					xfer->tx_buf = master->dummy_tx;
 960				if (!xfer->rx_buf)
 961					xfer->rx_buf = master->dummy_rx;
 962			}
 963		}
 964	}
 965
 966	return __spi_map_msg(master, msg);
 967}
 968
 969/*
 970 * spi_transfer_one_message - Default implementation of transfer_one_message()
 971 *
 972 * This is a standard implementation of transfer_one_message() for
 973 * drivers which implement a transfer_one() operation.  It provides
 974 * standard handling of delays and chip select management.
 975 */
 976static int spi_transfer_one_message(struct spi_master *master,
 977				    struct spi_message *msg)
 978{
 979	struct spi_transfer *xfer;
 980	bool keep_cs = false;
 981	int ret = 0;
 982	unsigned long long ms = 1;
 983	struct spi_statistics *statm = &master->statistics;
 984	struct spi_statistics *stats = &msg->spi->statistics;
 985
 986	spi_set_cs(msg->spi, true);
 987
 988	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
 989	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
 990
 991	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 992		trace_spi_transfer_start(msg, xfer);
 993
 994		spi_statistics_add_transfer_stats(statm, xfer, master);
 995		spi_statistics_add_transfer_stats(stats, xfer, master);
 996
 997		if (xfer->tx_buf || xfer->rx_buf) {
 998			reinit_completion(&master->xfer_completion);
 999
1000			ret = master->transfer_one(master, msg->spi, xfer);
1001			if (ret < 0) {
1002				SPI_STATISTICS_INCREMENT_FIELD(statm,
1003							       errors);
1004				SPI_STATISTICS_INCREMENT_FIELD(stats,
1005							       errors);
1006				dev_err(&msg->spi->dev,
1007					"SPI transfer failed: %d\n", ret);
1008				goto out;
1009			}
1010
1011			if (ret > 0) {
1012				ret = 0;
1013				ms = 8LL * 1000LL * xfer->len;
1014				do_div(ms, xfer->speed_hz);
1015				ms += ms + 100; /* some tolerance */
1016
1017				if (ms > UINT_MAX)
1018					ms = UINT_MAX;
1019
1020				ms = wait_for_completion_timeout(&master->xfer_completion,
1021								 msecs_to_jiffies(ms));
1022			}
1023
1024			if (ms == 0) {
1025				SPI_STATISTICS_INCREMENT_FIELD(statm,
1026							       timedout);
1027				SPI_STATISTICS_INCREMENT_FIELD(stats,
1028							       timedout);
1029				dev_err(&msg->spi->dev,
1030					"SPI transfer timed out\n");
1031				msg->status = -ETIMEDOUT;
1032			}
1033		} else {
1034			if (xfer->len)
1035				dev_err(&msg->spi->dev,
1036					"Bufferless transfer has length %u\n",
1037					xfer->len);
1038		}
1039
1040		trace_spi_transfer_stop(msg, xfer);
1041
1042		if (msg->status != -EINPROGRESS)
1043			goto out;
1044
1045		if (xfer->delay_usecs) {
1046			u16 us = xfer->delay_usecs;
1047
1048			if (us <= 10)
1049				udelay(us);
1050			else
1051				usleep_range(us, us + DIV_ROUND_UP(us, 10));
1052		}
1053
1054		if (xfer->cs_change) {
1055			if (list_is_last(&xfer->transfer_list,
1056					 &msg->transfers)) {
1057				keep_cs = true;
1058			} else {
1059				spi_set_cs(msg->spi, false);
1060				udelay(10);
1061				spi_set_cs(msg->spi, true);
1062			}
1063		}
1064
1065		msg->actual_length += xfer->len;
1066	}
1067
1068out:
1069	if (ret != 0 || !keep_cs)
1070		spi_set_cs(msg->spi, false);
1071
1072	if (msg->status == -EINPROGRESS)
1073		msg->status = ret;
1074
1075	if (msg->status && master->handle_err)
1076		master->handle_err(master, msg);
1077
1078	spi_res_release(master, msg);
1079
1080	spi_finalize_current_message(master);
1081
1082	return ret;
1083}
1084
1085/**
1086 * spi_finalize_current_transfer - report completion of a transfer
1087 * @master: the master reporting completion
1088 *
1089 * Called by SPI drivers using the core transfer_one_message()
1090 * implementation to notify it that the current interrupt driven
1091 * transfer has finished and the next one may be scheduled.
1092 */
1093void spi_finalize_current_transfer(struct spi_master *master)
1094{
1095	complete(&master->xfer_completion);
1096}
1097EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1098
1099/**
1100 * __spi_pump_messages - function which processes spi message queue
1101 * @master: master to process queue for
1102 * @in_kthread: true if we are in the context of the message pump thread
1103 *
1104 * This function checks if there is any spi message in the queue that
1105 * needs processing and if so call out to the driver to initialize hardware
1106 * and transfer each message.
1107 *
1108 * Note that it is called both from the kthread itself and also from
1109 * inside spi_sync(); the queue extraction handling at the top of the
1110 * function should deal with this safely.
1111 */
1112static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1113{
 
 
1114	unsigned long flags;
1115	bool was_busy = false;
1116	int ret;
1117
1118	/* Lock queue */
1119	spin_lock_irqsave(&master->queue_lock, flags);
1120
1121	/* Make sure we are not already running a message */
1122	if (master->cur_msg) {
1123		spin_unlock_irqrestore(&master->queue_lock, flags);
1124		return;
1125	}
1126
1127	/* If another context is idling the device then defer */
1128	if (master->idling) {
1129		kthread_queue_work(&master->kworker, &master->pump_messages);
1130		spin_unlock_irqrestore(&master->queue_lock, flags);
1131		return;
1132	}
1133
1134	/* Check if the queue is idle */
1135	if (list_empty(&master->queue) || !master->running) {
1136		if (!master->busy) {
1137			spin_unlock_irqrestore(&master->queue_lock, flags);
1138			return;
1139		}
1140
1141		/* Only do teardown in the thread */
1142		if (!in_kthread) {
1143			kthread_queue_work(&master->kworker,
1144					   &master->pump_messages);
1145			spin_unlock_irqrestore(&master->queue_lock, flags);
1146			return;
1147		}
1148
1149		master->busy = false;
1150		master->idling = true;
1151		spin_unlock_irqrestore(&master->queue_lock, flags);
 
 
1152
1153		kfree(master->dummy_rx);
1154		master->dummy_rx = NULL;
1155		kfree(master->dummy_tx);
1156		master->dummy_tx = NULL;
1157		if (master->unprepare_transfer_hardware &&
1158		    master->unprepare_transfer_hardware(master))
1159			dev_err(&master->dev,
1160				"failed to unprepare transfer hardware\n");
1161		if (master->auto_runtime_pm) {
1162			pm_runtime_mark_last_busy(master->dev.parent);
1163			pm_runtime_put_autosuspend(master->dev.parent);
1164		}
1165		trace_spi_master_idle(master);
1166
1167		spin_lock_irqsave(&master->queue_lock, flags);
1168		master->idling = false;
1169		spin_unlock_irqrestore(&master->queue_lock, flags);
1170		return;
1171	}
1172
1173	/* Extract head of queue */
1174	master->cur_msg =
1175		list_first_entry(&master->queue, struct spi_message, queue);
1176
1177	list_del_init(&master->cur_msg->queue);
1178	if (master->busy)
1179		was_busy = true;
1180	else
1181		master->busy = true;
1182	spin_unlock_irqrestore(&master->queue_lock, flags);
1183
1184	mutex_lock(&master->io_mutex);
1185
1186	if (!was_busy && master->auto_runtime_pm) {
1187		ret = pm_runtime_get_sync(master->dev.parent);
1188		if (ret < 0) {
1189			dev_err(&master->dev, "Failed to power device: %d\n",
1190				ret);
1191			mutex_unlock(&master->io_mutex);
1192			return;
1193		}
1194	}
1195
1196	if (!was_busy)
1197		trace_spi_master_busy(master);
1198
1199	if (!was_busy && master->prepare_transfer_hardware) {
1200		ret = master->prepare_transfer_hardware(master);
1201		if (ret) {
1202			dev_err(&master->dev,
1203				"failed to prepare transfer hardware\n");
1204
1205			if (master->auto_runtime_pm)
1206				pm_runtime_put(master->dev.parent);
1207			mutex_unlock(&master->io_mutex);
1208			return;
1209		}
1210	}
1211
1212	trace_spi_message_start(master->cur_msg);
1213
1214	if (master->prepare_message) {
1215		ret = master->prepare_message(master, master->cur_msg);
1216		if (ret) {
1217			dev_err(&master->dev,
1218				"failed to prepare message: %d\n", ret);
1219			master->cur_msg->status = ret;
1220			spi_finalize_current_message(master);
1221			goto out;
1222		}
1223		master->cur_msg_prepared = true;
1224	}
1225
1226	ret = spi_map_msg(master, master->cur_msg);
1227	if (ret) {
1228		master->cur_msg->status = ret;
1229		spi_finalize_current_message(master);
1230		goto out;
1231	}
1232
1233	ret = master->transfer_one_message(master, master->cur_msg);
1234	if (ret) {
1235		dev_err(&master->dev,
1236			"failed to transfer one message from queue\n");
1237		goto out;
1238	}
1239
1240out:
1241	mutex_unlock(&master->io_mutex);
1242
1243	/* Prod the scheduler in case transfer_one() was busy waiting */
1244	if (!ret)
1245		cond_resched();
1246}
1247
1248/**
1249 * spi_pump_messages - kthread work function which processes spi message queue
1250 * @work: pointer to kthread work struct contained in the master struct
1251 */
1252static void spi_pump_messages(struct kthread_work *work)
1253{
1254	struct spi_master *master =
1255		container_of(work, struct spi_master, pump_messages);
1256
1257	__spi_pump_messages(master, true);
1258}
1259
1260static int spi_init_queue(struct spi_master *master)
1261{
1262	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1263
 
 
 
1264	master->running = false;
1265	master->busy = false;
1266
1267	kthread_init_worker(&master->kworker);
1268	master->kworker_task = kthread_run(kthread_worker_fn,
1269					   &master->kworker, "%s",
1270					   dev_name(&master->dev));
1271	if (IS_ERR(master->kworker_task)) {
1272		dev_err(&master->dev, "failed to create message pump task\n");
1273		return PTR_ERR(master->kworker_task);
1274	}
1275	kthread_init_work(&master->pump_messages, spi_pump_messages);
1276
1277	/*
1278	 * Master config will indicate if this controller should run the
1279	 * message pump with high (realtime) priority to reduce the transfer
1280	 * latency on the bus by minimising the delay between a transfer
1281	 * request and the scheduling of the message pump thread. Without this
1282	 * setting the message pump thread will remain at default priority.
1283	 */
1284	if (master->rt) {
1285		dev_info(&master->dev,
1286			"will run message pump with realtime priority\n");
1287		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1288	}
1289
1290	return 0;
1291}
1292
1293/**
1294 * spi_get_next_queued_message() - called by driver to check for queued
1295 * messages
1296 * @master: the master to check for queued messages
1297 *
1298 * If there are more messages in the queue, the next message is returned from
1299 * this call.
1300 *
1301 * Return: the next message in the queue, else NULL if the queue is empty.
1302 */
1303struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1304{
1305	struct spi_message *next;
1306	unsigned long flags;
1307
1308	/* get a pointer to the next message, if any */
1309	spin_lock_irqsave(&master->queue_lock, flags);
1310	next = list_first_entry_or_null(&master->queue, struct spi_message,
1311					queue);
 
 
 
1312	spin_unlock_irqrestore(&master->queue_lock, flags);
1313
1314	return next;
1315}
1316EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1317
1318/**
1319 * spi_finalize_current_message() - the current message is complete
1320 * @master: the master to return the message to
1321 *
1322 * Called by the driver to notify the core that the message in the front of the
1323 * queue is complete and can be removed from the queue.
1324 */
1325void spi_finalize_current_message(struct spi_master *master)
1326{
1327	struct spi_message *mesg;
1328	unsigned long flags;
1329	int ret;
1330
1331	spin_lock_irqsave(&master->queue_lock, flags);
1332	mesg = master->cur_msg;
1333	spin_unlock_irqrestore(&master->queue_lock, flags);
1334
1335	spi_unmap_msg(master, mesg);
1336
1337	if (master->cur_msg_prepared && master->unprepare_message) {
1338		ret = master->unprepare_message(master, mesg);
1339		if (ret) {
1340			dev_err(&master->dev,
1341				"failed to unprepare message: %d\n", ret);
1342		}
1343	}
1344
1345	spin_lock_irqsave(&master->queue_lock, flags);
1346	master->cur_msg = NULL;
1347	master->cur_msg_prepared = false;
1348	kthread_queue_work(&master->kworker, &master->pump_messages);
1349	spin_unlock_irqrestore(&master->queue_lock, flags);
1350
1351	trace_spi_message_done(mesg);
1352
1353	mesg->state = NULL;
1354	if (mesg->complete)
1355		mesg->complete(mesg->context);
1356}
1357EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1358
1359static int spi_start_queue(struct spi_master *master)
1360{
1361	unsigned long flags;
1362
1363	spin_lock_irqsave(&master->queue_lock, flags);
1364
1365	if (master->running || master->busy) {
1366		spin_unlock_irqrestore(&master->queue_lock, flags);
1367		return -EBUSY;
1368	}
1369
1370	master->running = true;
1371	master->cur_msg = NULL;
1372	spin_unlock_irqrestore(&master->queue_lock, flags);
1373
1374	kthread_queue_work(&master->kworker, &master->pump_messages);
1375
1376	return 0;
1377}
1378
1379static int spi_stop_queue(struct spi_master *master)
1380{
1381	unsigned long flags;
1382	unsigned limit = 500;
1383	int ret = 0;
1384
1385	spin_lock_irqsave(&master->queue_lock, flags);
1386
1387	/*
1388	 * This is a bit lame, but is optimized for the common execution path.
1389	 * A wait_queue on the master->busy could be used, but then the common
1390	 * execution path (pump_messages) would be required to call wake_up or
1391	 * friends on every SPI message. Do this instead.
1392	 */
1393	while ((!list_empty(&master->queue) || master->busy) && limit--) {
1394		spin_unlock_irqrestore(&master->queue_lock, flags);
1395		usleep_range(10000, 11000);
1396		spin_lock_irqsave(&master->queue_lock, flags);
1397	}
1398
1399	if (!list_empty(&master->queue) || master->busy)
1400		ret = -EBUSY;
1401	else
1402		master->running = false;
1403
1404	spin_unlock_irqrestore(&master->queue_lock, flags);
1405
1406	if (ret) {
1407		dev_warn(&master->dev,
1408			 "could not stop message queue\n");
1409		return ret;
1410	}
1411	return ret;
1412}
1413
1414static int spi_destroy_queue(struct spi_master *master)
1415{
1416	int ret;
1417
1418	ret = spi_stop_queue(master);
1419
1420	/*
1421	 * kthread_flush_worker will block until all work is done.
1422	 * If the reason that stop_queue timed out is that the work will never
1423	 * finish, then it does no good to call flush/stop thread, so
1424	 * return anyway.
1425	 */
1426	if (ret) {
1427		dev_err(&master->dev, "problem destroying queue\n");
1428		return ret;
1429	}
1430
1431	kthread_flush_worker(&master->kworker);
1432	kthread_stop(master->kworker_task);
1433
1434	return 0;
1435}
1436
1437static int __spi_queued_transfer(struct spi_device *spi,
1438				 struct spi_message *msg,
1439				 bool need_pump)
 
 
 
1440{
1441	struct spi_master *master = spi->master;
1442	unsigned long flags;
1443
1444	spin_lock_irqsave(&master->queue_lock, flags);
1445
1446	if (!master->running) {
1447		spin_unlock_irqrestore(&master->queue_lock, flags);
1448		return -ESHUTDOWN;
1449	}
1450	msg->actual_length = 0;
1451	msg->status = -EINPROGRESS;
1452
1453	list_add_tail(&msg->queue, &master->queue);
1454	if (!master->busy && need_pump)
1455		kthread_queue_work(&master->kworker, &master->pump_messages);
1456
1457	spin_unlock_irqrestore(&master->queue_lock, flags);
1458	return 0;
1459}
1460
1461/**
1462 * spi_queued_transfer - transfer function for queued transfers
1463 * @spi: spi device which is requesting transfer
1464 * @msg: spi message which is to handled is queued to driver queue
1465 *
1466 * Return: zero on success, else a negative error code.
1467 */
1468static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1469{
1470	return __spi_queued_transfer(spi, msg, true);
1471}
1472
1473static int spi_master_initialize_queue(struct spi_master *master)
1474{
1475	int ret;
1476
 
1477	master->transfer = spi_queued_transfer;
1478	if (!master->transfer_one_message)
1479		master->transfer_one_message = spi_transfer_one_message;
1480
1481	/* Initialize and start queue */
1482	ret = spi_init_queue(master);
1483	if (ret) {
1484		dev_err(&master->dev, "problem initializing queue\n");
1485		goto err_init_queue;
1486	}
1487	master->queued = true;
1488	ret = spi_start_queue(master);
1489	if (ret) {
1490		dev_err(&master->dev, "problem starting queue\n");
1491		goto err_start_queue;
1492	}
1493
1494	return 0;
1495
1496err_start_queue:
 
1497	spi_destroy_queue(master);
1498err_init_queue:
1499	return ret;
1500}
1501
1502/*-------------------------------------------------------------------------*/
1503
1504#if defined(CONFIG_OF)
1505static struct spi_device *
1506of_register_spi_device(struct spi_master *master, struct device_node *nc)
1507{
1508	struct spi_device *spi;
1509	int rc;
1510	u32 value;
1511
1512	/* Alloc an spi_device */
1513	spi = spi_alloc_device(master);
1514	if (!spi) {
1515		dev_err(&master->dev, "spi_device alloc error for %s\n",
1516			nc->full_name);
1517		rc = -ENOMEM;
1518		goto err_out;
1519	}
1520
1521	/* Select device driver */
1522	rc = of_modalias_node(nc, spi->modalias,
1523				sizeof(spi->modalias));
1524	if (rc < 0) {
1525		dev_err(&master->dev, "cannot find modalias for %s\n",
1526			nc->full_name);
1527		goto err_out;
1528	}
1529
1530	/* Device address */
1531	rc = of_property_read_u32(nc, "reg", &value);
1532	if (rc) {
1533		dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1534			nc->full_name, rc);
1535		goto err_out;
1536	}
1537	spi->chip_select = value;
1538
1539	/* Mode (clock phase/polarity/etc.) */
1540	if (of_find_property(nc, "spi-cpha", NULL))
1541		spi->mode |= SPI_CPHA;
1542	if (of_find_property(nc, "spi-cpol", NULL))
1543		spi->mode |= SPI_CPOL;
1544	if (of_find_property(nc, "spi-cs-high", NULL))
1545		spi->mode |= SPI_CS_HIGH;
1546	if (of_find_property(nc, "spi-3wire", NULL))
1547		spi->mode |= SPI_3WIRE;
1548	if (of_find_property(nc, "spi-lsb-first", NULL))
1549		spi->mode |= SPI_LSB_FIRST;
1550
1551	/* Device DUAL/QUAD mode */
1552	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1553		switch (value) {
1554		case 1:
1555			break;
1556		case 2:
1557			spi->mode |= SPI_TX_DUAL;
1558			break;
1559		case 4:
1560			spi->mode |= SPI_TX_QUAD;
1561			break;
1562		default:
1563			dev_warn(&master->dev,
1564				"spi-tx-bus-width %d not supported\n",
1565				value);
1566			break;
1567		}
1568	}
1569
1570	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1571		switch (value) {
1572		case 1:
1573			break;
1574		case 2:
1575			spi->mode |= SPI_RX_DUAL;
1576			break;
1577		case 4:
1578			spi->mode |= SPI_RX_QUAD;
1579			break;
1580		default:
1581			dev_warn(&master->dev,
1582				"spi-rx-bus-width %d not supported\n",
1583				value);
1584			break;
1585		}
1586	}
1587
1588	/* Device speed */
1589	rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1590	if (rc) {
1591		dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1592			nc->full_name, rc);
1593		goto err_out;
1594	}
1595	spi->max_speed_hz = value;
1596
1597	/* Store a pointer to the node in the device structure */
1598	of_node_get(nc);
1599	spi->dev.of_node = nc;
1600
1601	/* Register the new device */
1602	rc = spi_add_device(spi);
1603	if (rc) {
1604		dev_err(&master->dev, "spi_device register error %s\n",
1605			nc->full_name);
1606		goto err_out;
1607	}
1608
1609	return spi;
1610
1611err_out:
1612	spi_dev_put(spi);
1613	return ERR_PTR(rc);
1614}
1615
1616/**
1617 * of_register_spi_devices() - Register child devices onto the SPI bus
1618 * @master:	Pointer to spi_master device
1619 *
1620 * Registers an spi_device for each child node of master node which has a 'reg'
1621 * property.
1622 */
1623static void of_register_spi_devices(struct spi_master *master)
1624{
1625	struct spi_device *spi;
1626	struct device_node *nc;
 
 
 
1627
1628	if (!master->dev.of_node)
1629		return;
1630
1631	for_each_available_child_of_node(master->dev.of_node, nc) {
1632		if (of_node_test_and_set_flag(nc, OF_POPULATED))
 
 
 
 
 
1633			continue;
1634		spi = of_register_spi_device(master, nc);
1635		if (IS_ERR(spi)) {
1636			dev_warn(&master->dev, "Failed to create SPI device for %s\n",
 
 
 
1637				nc->full_name);
1638			of_node_clear_flag(nc, OF_POPULATED);
 
1639		}
1640	}
1641}
1642#else
1643static void of_register_spi_devices(struct spi_master *master) { }
1644#endif
1645
1646#ifdef CONFIG_ACPI
1647static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1648{
1649	struct spi_device *spi = data;
1650	struct spi_master *master = spi->master;
 
 
 
 
1651
1652	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1653		struct acpi_resource_spi_serialbus *sb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1654
1655		sb = &ares->data.spi_serial_bus;
1656		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1657			/*
1658			 * ACPI DeviceSelection numbering is handled by the
1659			 * host controller driver in Windows and can vary
1660			 * from driver to driver. In Linux we always expect
1661			 * 0 .. max - 1 so we need to ask the driver to
1662			 * translate between the two schemes.
1663			 */
1664			if (master->fw_translate_cs) {
1665				int cs = master->fw_translate_cs(master,
1666						sb->device_selection);
1667				if (cs < 0)
1668					return cs;
1669				spi->chip_select = cs;
1670			} else {
1671				spi->chip_select = sb->device_selection;
1672			}
1673
1674			spi->max_speed_hz = sb->connection_speed;
1675
1676			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1677				spi->mode |= SPI_CPHA;
1678			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1679				spi->mode |= SPI_CPOL;
1680			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1681				spi->mode |= SPI_CS_HIGH;
 
 
 
1682		}
1683	} else if (spi->irq < 0) {
1684		struct resource r;
1685
1686		if (acpi_dev_resource_interrupt(ares, 0, &r))
1687			spi->irq = r.start;
1688	}
1689
1690	/* Always tell the ACPI core to skip this resource */
1691	return 1;
1692}
1693
1694static acpi_status acpi_register_spi_device(struct spi_master *master,
1695					    struct acpi_device *adev)
1696{
1697	struct list_head resource_list;
1698	struct spi_device *spi;
1699	int ret;
1700
1701	if (acpi_bus_get_status(adev) || !adev->status.present ||
1702	    acpi_device_enumerated(adev))
1703		return AE_OK;
1704
1705	spi = spi_alloc_device(master);
1706	if (!spi) {
1707		dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1708			dev_name(&adev->dev));
1709		return AE_NO_MEMORY;
1710	}
1711
1712	ACPI_COMPANION_SET(&spi->dev, adev);
1713	spi->irq = -1;
1714
1715	INIT_LIST_HEAD(&resource_list);
1716	ret = acpi_dev_get_resources(adev, &resource_list,
1717				     acpi_spi_add_resource, spi);
1718	acpi_dev_free_resource_list(&resource_list);
1719
1720	if (ret < 0 || !spi->max_speed_hz) {
1721		spi_dev_put(spi);
1722		return AE_OK;
1723	}
1724
1725	if (spi->irq < 0)
1726		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1727
1728	acpi_device_set_enumerated(adev);
1729
1730	adev->power.flags.ignore_parent = true;
1731	strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1732	if (spi_add_device(spi)) {
1733		adev->power.flags.ignore_parent = false;
1734		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1735			dev_name(&adev->dev));
1736		spi_dev_put(spi);
1737	}
1738
1739	return AE_OK;
1740}
1741
1742static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1743				       void *data, void **return_value)
1744{
1745	struct spi_master *master = data;
1746	struct acpi_device *adev;
1747
1748	if (acpi_bus_get_device(handle, &adev))
1749		return AE_OK;
1750
1751	return acpi_register_spi_device(master, adev);
1752}
1753
1754static void acpi_register_spi_devices(struct spi_master *master)
1755{
1756	acpi_status status;
1757	acpi_handle handle;
1758
1759	handle = ACPI_HANDLE(master->dev.parent);
1760	if (!handle)
1761		return;
1762
1763	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1764				     acpi_spi_add_device, NULL,
1765				     master, NULL);
1766	if (ACPI_FAILURE(status))
1767		dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1768}
1769#else
1770static inline void acpi_register_spi_devices(struct spi_master *master) {}
1771#endif /* CONFIG_ACPI */
1772
1773static void spi_master_release(struct device *dev)
1774{
1775	struct spi_master *master;
1776
1777	master = container_of(dev, struct spi_master, dev);
1778	kfree(master);
1779}
1780
1781static struct class spi_master_class = {
1782	.name		= "spi_master",
1783	.owner		= THIS_MODULE,
1784	.dev_release	= spi_master_release,
1785	.dev_groups	= spi_master_groups,
1786};
1787
1788
 
1789/**
1790 * spi_alloc_master - allocate SPI master controller
1791 * @dev: the controller, possibly using the platform_bus
1792 * @size: how much zeroed driver-private data to allocate; the pointer to this
1793 *	memory is in the driver_data field of the returned device,
1794 *	accessible with spi_master_get_devdata().
1795 * Context: can sleep
1796 *
1797 * This call is used only by SPI master controller drivers, which are the
1798 * only ones directly touching chip registers.  It's how they allocate
1799 * an spi_master structure, prior to calling spi_register_master().
1800 *
1801 * This must be called from context that can sleep.
 
1802 *
1803 * The caller is responsible for assigning the bus number and initializing
1804 * the master's methods before calling spi_register_master(); and (after errors
1805 * adding the device) calling spi_master_put() to prevent a memory leak.
1806 *
1807 * Return: the SPI master structure on success, else NULL.
1808 */
1809struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1810{
1811	struct spi_master	*master;
1812
1813	if (!dev)
1814		return NULL;
1815
1816	master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1817	if (!master)
1818		return NULL;
1819
1820	device_initialize(&master->dev);
1821	master->bus_num = -1;
1822	master->num_chipselect = 1;
1823	master->dev.class = &spi_master_class;
1824	master->dev.parent = dev;
1825	pm_suspend_ignore_children(&master->dev, true);
1826	spi_master_set_devdata(master, &master[1]);
1827
1828	return master;
1829}
1830EXPORT_SYMBOL_GPL(spi_alloc_master);
1831
1832#ifdef CONFIG_OF
1833static int of_spi_register_master(struct spi_master *master)
1834{
1835	int nb, i, *cs;
1836	struct device_node *np = master->dev.of_node;
1837
1838	if (!np)
1839		return 0;
1840
1841	nb = of_gpio_named_count(np, "cs-gpios");
1842	master->num_chipselect = max_t(int, nb, master->num_chipselect);
1843
1844	/* Return error only for an incorrectly formed cs-gpios property */
1845	if (nb == 0 || nb == -ENOENT)
1846		return 0;
1847	else if (nb < 0)
1848		return nb;
1849
1850	cs = devm_kzalloc(&master->dev,
1851			  sizeof(int) * master->num_chipselect,
1852			  GFP_KERNEL);
1853	master->cs_gpios = cs;
1854
1855	if (!master->cs_gpios)
1856		return -ENOMEM;
1857
1858	for (i = 0; i < master->num_chipselect; i++)
1859		cs[i] = -ENOENT;
1860
1861	for (i = 0; i < nb; i++)
1862		cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1863
1864	return 0;
1865}
1866#else
1867static int of_spi_register_master(struct spi_master *master)
1868{
1869	return 0;
1870}
1871#endif
1872
1873/**
1874 * spi_register_master - register SPI master controller
1875 * @master: initialized master, originally from spi_alloc_master()
1876 * Context: can sleep
1877 *
1878 * SPI master controllers connect to their drivers using some non-SPI bus,
1879 * such as the platform bus.  The final stage of probe() in that code
1880 * includes calling spi_register_master() to hook up to this SPI bus glue.
1881 *
1882 * SPI controllers use board specific (often SOC specific) bus numbers,
1883 * and board-specific addressing for SPI devices combines those numbers
1884 * with chip select numbers.  Since SPI does not directly support dynamic
1885 * device identification, boards need configuration tables telling which
1886 * chip is at which address.
1887 *
1888 * This must be called from context that can sleep.  It returns zero on
1889 * success, else a negative error code (dropping the master's refcount).
1890 * After a successful return, the caller is responsible for calling
1891 * spi_unregister_master().
1892 *
1893 * Return: zero on success, else a negative error code.
1894 */
1895int spi_register_master(struct spi_master *master)
1896{
1897	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1898	struct device		*dev = master->dev.parent;
1899	struct boardinfo	*bi;
1900	int			status = -ENODEV;
1901	int			dynamic = 0;
1902
1903	if (!dev)
1904		return -ENODEV;
1905
1906	status = of_spi_register_master(master);
1907	if (status)
1908		return status;
1909
1910	/* even if it's just one always-selected device, there must
1911	 * be at least one chipselect
1912	 */
1913	if (master->num_chipselect == 0)
1914		return -EINVAL;
1915
1916	if ((master->bus_num < 0) && master->dev.of_node)
1917		master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1918
1919	/* convention:  dynamically assigned bus IDs count down from the max */
1920	if (master->bus_num < 0) {
1921		/* FIXME switch to an IDR based scheme, something like
1922		 * I2C now uses, so we can't run out of "dynamic" IDs
1923		 */
1924		master->bus_num = atomic_dec_return(&dyn_bus_id);
1925		dynamic = 1;
1926	}
1927
1928	INIT_LIST_HEAD(&master->queue);
1929	spin_lock_init(&master->queue_lock);
1930	spin_lock_init(&master->bus_lock_spinlock);
1931	mutex_init(&master->bus_lock_mutex);
1932	mutex_init(&master->io_mutex);
1933	master->bus_lock_flag = 0;
1934	init_completion(&master->xfer_completion);
1935	if (!master->max_dma_len)
1936		master->max_dma_len = INT_MAX;
1937
1938	/* register the device, then userspace will see it.
1939	 * registration fails if the bus ID is in use.
1940	 */
1941	dev_set_name(&master->dev, "spi%u", master->bus_num);
1942	status = device_add(&master->dev);
1943	if (status < 0)
1944		goto done;
1945	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1946			dynamic ? " (dynamic)" : "");
1947
1948	/* If we're using a queued driver, start the queue */
1949	if (master->transfer)
1950		dev_info(dev, "master is unqueued, this is deprecated\n");
1951	else {
1952		status = spi_master_initialize_queue(master);
1953		if (status) {
1954			device_del(&master->dev);
1955			goto done;
1956		}
1957	}
1958	/* add statistics */
1959	spin_lock_init(&master->statistics.lock);
1960
1961	mutex_lock(&board_lock);
1962	list_add_tail(&master->list, &spi_master_list);
1963	list_for_each_entry(bi, &board_list, list)
1964		spi_match_master_to_boardinfo(master, &bi->board_info);
1965	mutex_unlock(&board_lock);
1966
1967	/* Register devices from the device tree and ACPI */
1968	of_register_spi_devices(master);
1969	acpi_register_spi_devices(master);
1970done:
1971	return status;
1972}
1973EXPORT_SYMBOL_GPL(spi_register_master);
1974
1975static void devm_spi_unregister(struct device *dev, void *res)
1976{
1977	spi_unregister_master(*(struct spi_master **)res);
1978}
1979
1980/**
1981 * dev_spi_register_master - register managed SPI master controller
1982 * @dev:    device managing SPI master
1983 * @master: initialized master, originally from spi_alloc_master()
1984 * Context: can sleep
1985 *
1986 * Register a SPI device as with spi_register_master() which will
1987 * automatically be unregister
1988 *
1989 * Return: zero on success, else a negative error code.
1990 */
1991int devm_spi_register_master(struct device *dev, struct spi_master *master)
1992{
1993	struct spi_master **ptr;
1994	int ret;
1995
1996	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1997	if (!ptr)
1998		return -ENOMEM;
1999
2000	ret = spi_register_master(master);
2001	if (!ret) {
2002		*ptr = master;
2003		devres_add(dev, ptr);
2004	} else {
2005		devres_free(ptr);
2006	}
2007
2008	return ret;
2009}
2010EXPORT_SYMBOL_GPL(devm_spi_register_master);
2011
2012static int __unregister(struct device *dev, void *null)
2013{
2014	spi_unregister_device(to_spi_device(dev));
2015	return 0;
2016}
2017
2018/**
2019 * spi_unregister_master - unregister SPI master controller
2020 * @master: the master being unregistered
2021 * Context: can sleep
2022 *
2023 * This call is used only by SPI master controller drivers, which are the
2024 * only ones directly touching chip registers.
2025 *
2026 * This must be called from context that can sleep.
2027 */
2028void spi_unregister_master(struct spi_master *master)
2029{
2030	int dummy;
2031
2032	if (master->queued) {
2033		if (spi_destroy_queue(master))
2034			dev_err(&master->dev, "queue remove failed\n");
2035	}
2036
2037	mutex_lock(&board_lock);
2038	list_del(&master->list);
2039	mutex_unlock(&board_lock);
2040
2041	dummy = device_for_each_child(&master->dev, NULL, __unregister);
2042	device_unregister(&master->dev);
2043}
2044EXPORT_SYMBOL_GPL(spi_unregister_master);
2045
2046int spi_master_suspend(struct spi_master *master)
2047{
2048	int ret;
2049
2050	/* Basically no-ops for non-queued masters */
2051	if (!master->queued)
2052		return 0;
2053
2054	ret = spi_stop_queue(master);
2055	if (ret)
2056		dev_err(&master->dev, "queue stop failed\n");
2057
2058	return ret;
2059}
2060EXPORT_SYMBOL_GPL(spi_master_suspend);
2061
2062int spi_master_resume(struct spi_master *master)
2063{
2064	int ret;
2065
2066	if (!master->queued)
2067		return 0;
2068
2069	ret = spi_start_queue(master);
2070	if (ret)
2071		dev_err(&master->dev, "queue restart failed\n");
2072
2073	return ret;
2074}
2075EXPORT_SYMBOL_GPL(spi_master_resume);
2076
2077static int __spi_master_match(struct device *dev, const void *data)
2078{
2079	struct spi_master *m;
2080	const u16 *bus_num = data;
2081
2082	m = container_of(dev, struct spi_master, dev);
2083	return m->bus_num == *bus_num;
2084}
2085
2086/**
2087 * spi_busnum_to_master - look up master associated with bus_num
2088 * @bus_num: the master's bus number
2089 * Context: can sleep
2090 *
2091 * This call may be used with devices that are registered after
2092 * arch init time.  It returns a refcounted pointer to the relevant
2093 * spi_master (which the caller must release), or NULL if there is
2094 * no such master registered.
2095 *
2096 * Return: the SPI master structure on success, else NULL.
2097 */
2098struct spi_master *spi_busnum_to_master(u16 bus_num)
2099{
2100	struct device		*dev;
2101	struct spi_master	*master = NULL;
2102
2103	dev = class_find_device(&spi_master_class, NULL, &bus_num,
2104				__spi_master_match);
2105	if (dev)
2106		master = container_of(dev, struct spi_master, dev);
2107	/* reference got in class_find_device */
2108	return master;
2109}
2110EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2111
2112/*-------------------------------------------------------------------------*/
2113
2114/* Core methods for SPI resource management */
2115
2116/**
2117 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2118 *                 during the processing of a spi_message while using
2119 *                 spi_transfer_one
2120 * @spi:     the spi device for which we allocate memory
2121 * @release: the release code to execute for this resource
2122 * @size:    size to alloc and return
2123 * @gfp:     GFP allocation flags
2124 *
2125 * Return: the pointer to the allocated data
2126 *
2127 * This may get enhanced in the future to allocate from a memory pool
2128 * of the @spi_device or @spi_master to avoid repeated allocations.
2129 */
2130void *spi_res_alloc(struct spi_device *spi,
2131		    spi_res_release_t release,
2132		    size_t size, gfp_t gfp)
2133{
2134	struct spi_res *sres;
2135
2136	sres = kzalloc(sizeof(*sres) + size, gfp);
2137	if (!sres)
2138		return NULL;
2139
2140	INIT_LIST_HEAD(&sres->entry);
2141	sres->release = release;
2142
2143	return sres->data;
2144}
2145EXPORT_SYMBOL_GPL(spi_res_alloc);
2146
2147/**
2148 * spi_res_free - free an spi resource
2149 * @res: pointer to the custom data of a resource
2150 *
2151 */
2152void spi_res_free(void *res)
2153{
2154	struct spi_res *sres = container_of(res, struct spi_res, data);
2155
2156	if (!res)
2157		return;
2158
2159	WARN_ON(!list_empty(&sres->entry));
2160	kfree(sres);
2161}
2162EXPORT_SYMBOL_GPL(spi_res_free);
2163
2164/**
2165 * spi_res_add - add a spi_res to the spi_message
2166 * @message: the spi message
2167 * @res:     the spi_resource
2168 */
2169void spi_res_add(struct spi_message *message, void *res)
2170{
2171	struct spi_res *sres = container_of(res, struct spi_res, data);
2172
2173	WARN_ON(!list_empty(&sres->entry));
2174	list_add_tail(&sres->entry, &message->resources);
2175}
2176EXPORT_SYMBOL_GPL(spi_res_add);
2177
2178/**
2179 * spi_res_release - release all spi resources for this message
2180 * @master:  the @spi_master
2181 * @message: the @spi_message
2182 */
2183void spi_res_release(struct spi_master *master,
2184		     struct spi_message *message)
2185{
2186	struct spi_res *res;
2187
2188	while (!list_empty(&message->resources)) {
2189		res = list_last_entry(&message->resources,
2190				      struct spi_res, entry);
2191
2192		if (res->release)
2193			res->release(master, message, res->data);
2194
2195		list_del(&res->entry);
2196
2197		kfree(res);
2198	}
2199}
2200EXPORT_SYMBOL_GPL(spi_res_release);
2201
2202/*-------------------------------------------------------------------------*/
2203
2204/* Core methods for spi_message alterations */
2205
2206static void __spi_replace_transfers_release(struct spi_master *master,
2207					    struct spi_message *msg,
2208					    void *res)
2209{
2210	struct spi_replaced_transfers *rxfer = res;
2211	size_t i;
2212
2213	/* call extra callback if requested */
2214	if (rxfer->release)
2215		rxfer->release(master, msg, res);
2216
2217	/* insert replaced transfers back into the message */
2218	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2219
2220	/* remove the formerly inserted entries */
2221	for (i = 0; i < rxfer->inserted; i++)
2222		list_del(&rxfer->inserted_transfers[i].transfer_list);
2223}
2224
2225/**
2226 * spi_replace_transfers - replace transfers with several transfers
2227 *                         and register change with spi_message.resources
2228 * @msg:           the spi_message we work upon
2229 * @xfer_first:    the first spi_transfer we want to replace
2230 * @remove:        number of transfers to remove
2231 * @insert:        the number of transfers we want to insert instead
2232 * @release:       extra release code necessary in some circumstances
2233 * @extradatasize: extra data to allocate (with alignment guarantees
2234 *                 of struct @spi_transfer)
2235 * @gfp:           gfp flags
2236 *
2237 * Returns: pointer to @spi_replaced_transfers,
2238 *          PTR_ERR(...) in case of errors.
2239 */
2240struct spi_replaced_transfers *spi_replace_transfers(
2241	struct spi_message *msg,
2242	struct spi_transfer *xfer_first,
2243	size_t remove,
2244	size_t insert,
2245	spi_replaced_release_t release,
2246	size_t extradatasize,
2247	gfp_t gfp)
2248{
2249	struct spi_replaced_transfers *rxfer;
2250	struct spi_transfer *xfer;
2251	size_t i;
2252
2253	/* allocate the structure using spi_res */
2254	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2255			      insert * sizeof(struct spi_transfer)
2256			      + sizeof(struct spi_replaced_transfers)
2257			      + extradatasize,
2258			      gfp);
2259	if (!rxfer)
2260		return ERR_PTR(-ENOMEM);
2261
2262	/* the release code to invoke before running the generic release */
2263	rxfer->release = release;
2264
2265	/* assign extradata */
2266	if (extradatasize)
2267		rxfer->extradata =
2268			&rxfer->inserted_transfers[insert];
2269
2270	/* init the replaced_transfers list */
2271	INIT_LIST_HEAD(&rxfer->replaced_transfers);
2272
2273	/* assign the list_entry after which we should reinsert
2274	 * the @replaced_transfers - it may be spi_message.messages!
2275	 */
2276	rxfer->replaced_after = xfer_first->transfer_list.prev;
2277
2278	/* remove the requested number of transfers */
2279	for (i = 0; i < remove; i++) {
2280		/* if the entry after replaced_after it is msg->transfers
2281		 * then we have been requested to remove more transfers
2282		 * than are in the list
2283		 */
2284		if (rxfer->replaced_after->next == &msg->transfers) {
2285			dev_err(&msg->spi->dev,
2286				"requested to remove more spi_transfers than are available\n");
2287			/* insert replaced transfers back into the message */
2288			list_splice(&rxfer->replaced_transfers,
2289				    rxfer->replaced_after);
2290
2291			/* free the spi_replace_transfer structure */
2292			spi_res_free(rxfer);
2293
2294			/* and return with an error */
2295			return ERR_PTR(-EINVAL);
2296		}
2297
2298		/* remove the entry after replaced_after from list of
2299		 * transfers and add it to list of replaced_transfers
2300		 */
2301		list_move_tail(rxfer->replaced_after->next,
2302			       &rxfer->replaced_transfers);
2303	}
2304
2305	/* create copy of the given xfer with identical settings
2306	 * based on the first transfer to get removed
2307	 */
2308	for (i = 0; i < insert; i++) {
2309		/* we need to run in reverse order */
2310		xfer = &rxfer->inserted_transfers[insert - 1 - i];
2311
2312		/* copy all spi_transfer data */
2313		memcpy(xfer, xfer_first, sizeof(*xfer));
2314
2315		/* add to list */
2316		list_add(&xfer->transfer_list, rxfer->replaced_after);
2317
2318		/* clear cs_change and delay_usecs for all but the last */
2319		if (i) {
2320			xfer->cs_change = false;
2321			xfer->delay_usecs = 0;
2322		}
2323	}
2324
2325	/* set up inserted */
2326	rxfer->inserted = insert;
2327
2328	/* and register it with spi_res/spi_message */
2329	spi_res_add(msg, rxfer);
2330
2331	return rxfer;
2332}
2333EXPORT_SYMBOL_GPL(spi_replace_transfers);
2334
2335static int __spi_split_transfer_maxsize(struct spi_master *master,
2336					struct spi_message *msg,
2337					struct spi_transfer **xferp,
2338					size_t maxsize,
2339					gfp_t gfp)
2340{
2341	struct spi_transfer *xfer = *xferp, *xfers;
2342	struct spi_replaced_transfers *srt;
2343	size_t offset;
2344	size_t count, i;
2345
2346	/* warn once about this fact that we are splitting a transfer */
2347	dev_warn_once(&msg->spi->dev,
2348		      "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2349		      xfer->len, maxsize);
2350
2351	/* calculate how many we have to replace */
2352	count = DIV_ROUND_UP(xfer->len, maxsize);
2353
2354	/* create replacement */
2355	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2356	if (IS_ERR(srt))
2357		return PTR_ERR(srt);
2358	xfers = srt->inserted_transfers;
2359
2360	/* now handle each of those newly inserted spi_transfers
2361	 * note that the replacements spi_transfers all are preset
2362	 * to the same values as *xferp, so tx_buf, rx_buf and len
2363	 * are all identical (as well as most others)
2364	 * so we just have to fix up len and the pointers.
2365	 *
2366	 * this also includes support for the depreciated
2367	 * spi_message.is_dma_mapped interface
2368	 */
2369
2370	/* the first transfer just needs the length modified, so we
2371	 * run it outside the loop
2372	 */
2373	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2374
2375	/* all the others need rx_buf/tx_buf also set */
2376	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2377		/* update rx_buf, tx_buf and dma */
2378		if (xfers[i].rx_buf)
2379			xfers[i].rx_buf += offset;
2380		if (xfers[i].rx_dma)
2381			xfers[i].rx_dma += offset;
2382		if (xfers[i].tx_buf)
2383			xfers[i].tx_buf += offset;
2384		if (xfers[i].tx_dma)
2385			xfers[i].tx_dma += offset;
2386
2387		/* update length */
2388		xfers[i].len = min(maxsize, xfers[i].len - offset);
2389	}
2390
2391	/* we set up xferp to the last entry we have inserted,
2392	 * so that we skip those already split transfers
2393	 */
2394	*xferp = &xfers[count - 1];
2395
2396	/* increment statistics counters */
2397	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2398				       transfers_split_maxsize);
2399	SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2400				       transfers_split_maxsize);
2401
2402	return 0;
2403}
2404
2405/**
2406 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2407 *                              when an individual transfer exceeds a
2408 *                              certain size
2409 * @master:    the @spi_master for this transfer
2410 * @msg:   the @spi_message to transform
2411 * @maxsize:  the maximum when to apply this
2412 * @gfp: GFP allocation flags
2413 *
2414 * Return: status of transformation
2415 */
2416int spi_split_transfers_maxsize(struct spi_master *master,
2417				struct spi_message *msg,
2418				size_t maxsize,
2419				gfp_t gfp)
2420{
2421	struct spi_transfer *xfer;
2422	int ret;
2423
2424	/* iterate over the transfer_list,
2425	 * but note that xfer is advanced to the last transfer inserted
2426	 * to avoid checking sizes again unnecessarily (also xfer does
2427	 * potentiall belong to a different list by the time the
2428	 * replacement has happened
2429	 */
2430	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2431		if (xfer->len > maxsize) {
2432			ret = __spi_split_transfer_maxsize(
2433				master, msg, &xfer, maxsize, gfp);
2434			if (ret)
2435				return ret;
2436		}
2437	}
2438
2439	return 0;
2440}
2441EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2442
2443/*-------------------------------------------------------------------------*/
2444
2445/* Core methods for SPI master protocol drivers.  Some of the
2446 * other core methods are currently defined as inline functions.
2447 */
2448
2449static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2450{
2451	if (master->bits_per_word_mask) {
2452		/* Only 32 bits fit in the mask */
2453		if (bits_per_word > 32)
2454			return -EINVAL;
2455		if (!(master->bits_per_word_mask &
2456				SPI_BPW_MASK(bits_per_word)))
2457			return -EINVAL;
2458	}
2459
2460	return 0;
2461}
2462
2463/**
2464 * spi_setup - setup SPI mode and clock rate
2465 * @spi: the device whose settings are being modified
2466 * Context: can sleep, and no requests are queued to the device
2467 *
2468 * SPI protocol drivers may need to update the transfer mode if the
2469 * device doesn't work with its default.  They may likewise need
2470 * to update clock rates or word sizes from initial values.  This function
2471 * changes those settings, and must be called from a context that can sleep.
2472 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2473 * effect the next time the device is selected and data is transferred to
2474 * or from it.  When this function returns, the spi device is deselected.
2475 *
2476 * Note that this call will fail if the protocol driver specifies an option
2477 * that the underlying controller or its driver does not support.  For
2478 * example, not all hardware supports wire transfers using nine bit words,
2479 * LSB-first wire encoding, or active-high chipselects.
2480 *
2481 * Return: zero on success, else a negative error code.
2482 */
2483int spi_setup(struct spi_device *spi)
2484{
2485	unsigned	bad_bits, ugly_bits;
2486	int		status;
2487
2488	/* check mode to prevent that DUAL and QUAD set at the same time
2489	 */
2490	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2491		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2492		dev_err(&spi->dev,
2493		"setup: can not select dual and quad at the same time\n");
2494		return -EINVAL;
2495	}
2496	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2497	 */
2498	if ((spi->mode & SPI_3WIRE) && (spi->mode &
2499		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2500		return -EINVAL;
2501	/* help drivers fail *cleanly* when they need options
2502	 * that aren't supported with their current master
2503	 */
2504	bad_bits = spi->mode & ~spi->master->mode_bits;
2505	ugly_bits = bad_bits &
2506		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2507	if (ugly_bits) {
2508		dev_warn(&spi->dev,
2509			 "setup: ignoring unsupported mode bits %x\n",
2510			 ugly_bits);
2511		spi->mode &= ~ugly_bits;
2512		bad_bits &= ~ugly_bits;
2513	}
2514	if (bad_bits) {
2515		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2516			bad_bits);
2517		return -EINVAL;
2518	}
2519
2520	if (!spi->bits_per_word)
2521		spi->bits_per_word = 8;
2522
2523	status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2524	if (status)
2525		return status;
2526
2527	if (!spi->max_speed_hz)
2528		spi->max_speed_hz = spi->master->max_speed_hz;
2529
2530	if (spi->master->setup)
2531		status = spi->master->setup(spi);
2532
2533	spi_set_cs(spi, false);
2534
2535	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2536			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2537			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2538			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2539			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
2540			(spi->mode & SPI_LOOP) ? "loopback, " : "",
2541			spi->bits_per_word, spi->max_speed_hz,
2542			status);
2543
2544	return status;
2545}
2546EXPORT_SYMBOL_GPL(spi_setup);
2547
2548static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2549{
2550	struct spi_master *master = spi->master;
2551	struct spi_transfer *xfer;
2552	int w_size;
2553
2554	if (list_empty(&message->transfers))
2555		return -EINVAL;
2556
2557	/* Half-duplex links include original MicroWire, and ones with
2558	 * only one data pin like SPI_3WIRE (switches direction) or where
2559	 * either MOSI or MISO is missing.  They can also be caused by
2560	 * software limitations.
2561	 */
2562	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2563			|| (spi->mode & SPI_3WIRE)) {
 
2564		unsigned flags = master->flags;
2565
2566		list_for_each_entry(xfer, &message->transfers, transfer_list) {
2567			if (xfer->rx_buf && xfer->tx_buf)
2568				return -EINVAL;
2569			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2570				return -EINVAL;
2571			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2572				return -EINVAL;
2573		}
2574	}
2575
2576	/**
2577	 * Set transfer bits_per_word and max speed as spi device default if
2578	 * it is not set for this transfer.
2579	 * Set transfer tx_nbits and rx_nbits as single transfer default
2580	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2581	 */
2582	message->frame_length = 0;
2583	list_for_each_entry(xfer, &message->transfers, transfer_list) {
2584		message->frame_length += xfer->len;
2585		if (!xfer->bits_per_word)
2586			xfer->bits_per_word = spi->bits_per_word;
2587
2588		if (!xfer->speed_hz)
2589			xfer->speed_hz = spi->max_speed_hz;
2590		if (!xfer->speed_hz)
2591			xfer->speed_hz = master->max_speed_hz;
2592
2593		if (master->max_speed_hz &&
2594		    xfer->speed_hz > master->max_speed_hz)
2595			xfer->speed_hz = master->max_speed_hz;
2596
2597		if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2598			return -EINVAL;
2599
2600		/*
2601		 * SPI transfer length should be multiple of SPI word size
2602		 * where SPI word size should be power-of-two multiple
2603		 */
2604		if (xfer->bits_per_word <= 8)
2605			w_size = 1;
2606		else if (xfer->bits_per_word <= 16)
2607			w_size = 2;
2608		else
2609			w_size = 4;
2610
2611		/* No partial transfers accepted */
2612		if (xfer->len % w_size)
2613			return -EINVAL;
2614
2615		if (xfer->speed_hz && master->min_speed_hz &&
2616		    xfer->speed_hz < master->min_speed_hz)
2617			return -EINVAL;
2618
2619		if (xfer->tx_buf && !xfer->tx_nbits)
2620			xfer->tx_nbits = SPI_NBITS_SINGLE;
2621		if (xfer->rx_buf && !xfer->rx_nbits)
2622			xfer->rx_nbits = SPI_NBITS_SINGLE;
2623		/* check transfer tx/rx_nbits:
2624		 * 1. check the value matches one of single, dual and quad
2625		 * 2. check tx/rx_nbits match the mode in spi_device
2626		 */
2627		if (xfer->tx_buf) {
2628			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2629				xfer->tx_nbits != SPI_NBITS_DUAL &&
2630				xfer->tx_nbits != SPI_NBITS_QUAD)
2631				return -EINVAL;
2632			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2633				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2634				return -EINVAL;
2635			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2636				!(spi->mode & SPI_TX_QUAD))
2637				return -EINVAL;
2638		}
2639		/* check transfer rx_nbits */
2640		if (xfer->rx_buf) {
2641			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2642				xfer->rx_nbits != SPI_NBITS_DUAL &&
2643				xfer->rx_nbits != SPI_NBITS_QUAD)
2644				return -EINVAL;
2645			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2646				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2647				return -EINVAL;
2648			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2649				!(spi->mode & SPI_RX_QUAD))
2650				return -EINVAL;
2651		}
2652	}
2653
2654	message->status = -EINPROGRESS;
2655
2656	return 0;
2657}
2658
2659static int __spi_async(struct spi_device *spi, struct spi_message *message)
2660{
2661	struct spi_master *master = spi->master;
2662
2663	message->spi = spi;
2664
2665	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2666	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2667
2668	trace_spi_message_submit(message);
2669
2670	return master->transfer(spi, message);
2671}
2672
2673/**
2674 * spi_async - asynchronous SPI transfer
2675 * @spi: device with which data will be exchanged
2676 * @message: describes the data transfers, including completion callback
2677 * Context: any (irqs may be blocked, etc)
2678 *
2679 * This call may be used in_irq and other contexts which can't sleep,
2680 * as well as from task contexts which can sleep.
2681 *
2682 * The completion callback is invoked in a context which can't sleep.
2683 * Before that invocation, the value of message->status is undefined.
2684 * When the callback is issued, message->status holds either zero (to
2685 * indicate complete success) or a negative error code.  After that
2686 * callback returns, the driver which issued the transfer request may
2687 * deallocate the associated memory; it's no longer in use by any SPI
2688 * core or controller driver code.
2689 *
2690 * Note that although all messages to a spi_device are handled in
2691 * FIFO order, messages may go to different devices in other orders.
2692 * Some device might be higher priority, or have various "hard" access
2693 * time requirements, for example.
2694 *
2695 * On detection of any fault during the transfer, processing of
2696 * the entire message is aborted, and the device is deselected.
2697 * Until returning from the associated message completion callback,
2698 * no other spi_message queued to that device will be processed.
2699 * (This rule applies equally to all the synchronous transfer calls,
2700 * which are wrappers around this core asynchronous primitive.)
2701 *
2702 * Return: zero on success, else a negative error code.
2703 */
2704int spi_async(struct spi_device *spi, struct spi_message *message)
2705{
2706	struct spi_master *master = spi->master;
2707	int ret;
2708	unsigned long flags;
2709
2710	ret = __spi_validate(spi, message);
2711	if (ret != 0)
2712		return ret;
2713
2714	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2715
2716	if (master->bus_lock_flag)
2717		ret = -EBUSY;
2718	else
2719		ret = __spi_async(spi, message);
2720
2721	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2722
2723	return ret;
2724}
2725EXPORT_SYMBOL_GPL(spi_async);
2726
2727/**
2728 * spi_async_locked - version of spi_async with exclusive bus usage
2729 * @spi: device with which data will be exchanged
2730 * @message: describes the data transfers, including completion callback
2731 * Context: any (irqs may be blocked, etc)
2732 *
2733 * This call may be used in_irq and other contexts which can't sleep,
2734 * as well as from task contexts which can sleep.
2735 *
2736 * The completion callback is invoked in a context which can't sleep.
2737 * Before that invocation, the value of message->status is undefined.
2738 * When the callback is issued, message->status holds either zero (to
2739 * indicate complete success) or a negative error code.  After that
2740 * callback returns, the driver which issued the transfer request may
2741 * deallocate the associated memory; it's no longer in use by any SPI
2742 * core or controller driver code.
2743 *
2744 * Note that although all messages to a spi_device are handled in
2745 * FIFO order, messages may go to different devices in other orders.
2746 * Some device might be higher priority, or have various "hard" access
2747 * time requirements, for example.
2748 *
2749 * On detection of any fault during the transfer, processing of
2750 * the entire message is aborted, and the device is deselected.
2751 * Until returning from the associated message completion callback,
2752 * no other spi_message queued to that device will be processed.
2753 * (This rule applies equally to all the synchronous transfer calls,
2754 * which are wrappers around this core asynchronous primitive.)
2755 *
2756 * Return: zero on success, else a negative error code.
2757 */
2758int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2759{
2760	struct spi_master *master = spi->master;
2761	int ret;
2762	unsigned long flags;
2763
2764	ret = __spi_validate(spi, message);
2765	if (ret != 0)
2766		return ret;
2767
2768	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2769
2770	ret = __spi_async(spi, message);
2771
2772	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2773
2774	return ret;
2775
2776}
2777EXPORT_SYMBOL_GPL(spi_async_locked);
2778
2779
2780int spi_flash_read(struct spi_device *spi,
2781		   struct spi_flash_read_message *msg)
2782
2783{
2784	struct spi_master *master = spi->master;
2785	struct device *rx_dev = NULL;
2786	int ret;
2787
2788	if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2789	     msg->addr_nbits == SPI_NBITS_DUAL) &&
2790	    !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2791		return -EINVAL;
2792	if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2793	     msg->addr_nbits == SPI_NBITS_QUAD) &&
2794	    !(spi->mode & SPI_TX_QUAD))
2795		return -EINVAL;
2796	if (msg->data_nbits == SPI_NBITS_DUAL &&
2797	    !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2798		return -EINVAL;
2799	if (msg->data_nbits == SPI_NBITS_QUAD &&
2800	    !(spi->mode &  SPI_RX_QUAD))
2801		return -EINVAL;
2802
2803	if (master->auto_runtime_pm) {
2804		ret = pm_runtime_get_sync(master->dev.parent);
2805		if (ret < 0) {
2806			dev_err(&master->dev, "Failed to power device: %d\n",
2807				ret);
2808			return ret;
2809		}
2810	}
2811
2812	mutex_lock(&master->bus_lock_mutex);
2813	mutex_lock(&master->io_mutex);
2814	if (master->dma_rx) {
2815		rx_dev = master->dma_rx->device->dev;
2816		ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
2817				  msg->buf, msg->len,
2818				  DMA_FROM_DEVICE);
2819		if (!ret)
2820			msg->cur_msg_mapped = true;
2821	}
2822	ret = master->spi_flash_read(spi, msg);
2823	if (msg->cur_msg_mapped)
2824		spi_unmap_buf(master, rx_dev, &msg->rx_sg,
2825			      DMA_FROM_DEVICE);
2826	mutex_unlock(&master->io_mutex);
2827	mutex_unlock(&master->bus_lock_mutex);
2828
2829	if (master->auto_runtime_pm)
2830		pm_runtime_put(master->dev.parent);
2831
2832	return ret;
2833}
2834EXPORT_SYMBOL_GPL(spi_flash_read);
2835
2836/*-------------------------------------------------------------------------*/
2837
2838/* Utility methods for SPI master protocol drivers, layered on
2839 * top of the core.  Some other utility methods are defined as
2840 * inline functions.
2841 */
2842
2843static void spi_complete(void *arg)
2844{
2845	complete(arg);
2846}
2847
2848static int __spi_sync(struct spi_device *spi, struct spi_message *message)
 
2849{
2850	DECLARE_COMPLETION_ONSTACK(done);
2851	int status;
2852	struct spi_master *master = spi->master;
2853	unsigned long flags;
2854
2855	status = __spi_validate(spi, message);
2856	if (status != 0)
2857		return status;
2858
2859	message->complete = spi_complete;
2860	message->context = &done;
2861	message->spi = spi;
2862
2863	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2864	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2865
2866	/* If we're not using the legacy transfer method then we will
2867	 * try to transfer in the calling context so special case.
2868	 * This code would be less tricky if we could remove the
2869	 * support for driver implemented message queues.
2870	 */
2871	if (master->transfer == spi_queued_transfer) {
2872		spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2873
2874		trace_spi_message_submit(message);
2875
2876		status = __spi_queued_transfer(spi, message, false);
2877
2878		spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2879	} else {
2880		status = spi_async_locked(spi, message);
2881	}
2882
2883	if (status == 0) {
2884		/* Push out the messages in the calling context if we
2885		 * can.
2886		 */
2887		if (master->transfer == spi_queued_transfer) {
2888			SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2889						       spi_sync_immediate);
2890			SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2891						       spi_sync_immediate);
2892			__spi_pump_messages(master, false);
2893		}
2894
2895		wait_for_completion(&done);
2896		status = message->status;
2897	}
2898	message->context = NULL;
2899	return status;
2900}
2901
2902/**
2903 * spi_sync - blocking/synchronous SPI data transfers
2904 * @spi: device with which data will be exchanged
2905 * @message: describes the data transfers
2906 * Context: can sleep
2907 *
2908 * This call may only be used from a context that may sleep.  The sleep
2909 * is non-interruptible, and has no timeout.  Low-overhead controller
2910 * drivers may DMA directly into and out of the message buffers.
2911 *
2912 * Note that the SPI device's chip select is active during the message,
2913 * and then is normally disabled between messages.  Drivers for some
2914 * frequently-used devices may want to minimize costs of selecting a chip,
2915 * by leaving it selected in anticipation that the next message will go
2916 * to the same chip.  (That may increase power usage.)
2917 *
2918 * Also, the caller is guaranteeing that the memory associated with the
2919 * message will not be freed before this call returns.
2920 *
2921 * Return: zero on success, else a negative error code.
2922 */
2923int spi_sync(struct spi_device *spi, struct spi_message *message)
2924{
2925	int ret;
2926
2927	mutex_lock(&spi->master->bus_lock_mutex);
2928	ret = __spi_sync(spi, message);
2929	mutex_unlock(&spi->master->bus_lock_mutex);
2930
2931	return ret;
2932}
2933EXPORT_SYMBOL_GPL(spi_sync);
2934
2935/**
2936 * spi_sync_locked - version of spi_sync with exclusive bus usage
2937 * @spi: device with which data will be exchanged
2938 * @message: describes the data transfers
2939 * Context: can sleep
2940 *
2941 * This call may only be used from a context that may sleep.  The sleep
2942 * is non-interruptible, and has no timeout.  Low-overhead controller
2943 * drivers may DMA directly into and out of the message buffers.
2944 *
2945 * This call should be used by drivers that require exclusive access to the
2946 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2947 * be released by a spi_bus_unlock call when the exclusive access is over.
2948 *
2949 * Return: zero on success, else a negative error code.
2950 */
2951int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2952{
2953	return __spi_sync(spi, message);
2954}
2955EXPORT_SYMBOL_GPL(spi_sync_locked);
2956
2957/**
2958 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2959 * @master: SPI bus master that should be locked for exclusive bus access
2960 * Context: can sleep
2961 *
2962 * This call may only be used from a context that may sleep.  The sleep
2963 * is non-interruptible, and has no timeout.
2964 *
2965 * This call should be used by drivers that require exclusive access to the
2966 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2967 * exclusive access is over. Data transfer must be done by spi_sync_locked
2968 * and spi_async_locked calls when the SPI bus lock is held.
2969 *
2970 * Return: always zero.
2971 */
2972int spi_bus_lock(struct spi_master *master)
2973{
2974	unsigned long flags;
2975
2976	mutex_lock(&master->bus_lock_mutex);
2977
2978	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2979	master->bus_lock_flag = 1;
2980	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2981
2982	/* mutex remains locked until spi_bus_unlock is called */
2983
2984	return 0;
2985}
2986EXPORT_SYMBOL_GPL(spi_bus_lock);
2987
2988/**
2989 * spi_bus_unlock - release the lock for exclusive SPI bus usage
2990 * @master: SPI bus master that was locked for exclusive bus access
2991 * Context: can sleep
2992 *
2993 * This call may only be used from a context that may sleep.  The sleep
2994 * is non-interruptible, and has no timeout.
2995 *
2996 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2997 * call.
2998 *
2999 * Return: always zero.
3000 */
3001int spi_bus_unlock(struct spi_master *master)
3002{
3003	master->bus_lock_flag = 0;
3004
3005	mutex_unlock(&master->bus_lock_mutex);
3006
3007	return 0;
3008}
3009EXPORT_SYMBOL_GPL(spi_bus_unlock);
3010
3011/* portable code must never pass more than 32 bytes */
3012#define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
3013
3014static u8	*buf;
3015
3016/**
3017 * spi_write_then_read - SPI synchronous write followed by read
3018 * @spi: device with which data will be exchanged
3019 * @txbuf: data to be written (need not be dma-safe)
3020 * @n_tx: size of txbuf, in bytes
3021 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3022 * @n_rx: size of rxbuf, in bytes
3023 * Context: can sleep
3024 *
3025 * This performs a half duplex MicroWire style transaction with the
3026 * device, sending txbuf and then reading rxbuf.  The return value
3027 * is zero for success, else a negative errno status code.
3028 * This call may only be used from a context that may sleep.
3029 *
3030 * Parameters to this routine are always copied using a small buffer;
3031 * portable code should never use this for more than 32 bytes.
3032 * Performance-sensitive or bulk transfer code should instead use
3033 * spi_{async,sync}() calls with dma-safe buffers.
3034 *
3035 * Return: zero on success, else a negative error code.
3036 */
3037int spi_write_then_read(struct spi_device *spi,
3038		const void *txbuf, unsigned n_tx,
3039		void *rxbuf, unsigned n_rx)
3040{
3041	static DEFINE_MUTEX(lock);
3042
3043	int			status;
3044	struct spi_message	message;
3045	struct spi_transfer	x[2];
3046	u8			*local_buf;
3047
3048	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
3049	 * copying here, (as a pure convenience thing), but we can
3050	 * keep heap costs out of the hot path unless someone else is
3051	 * using the pre-allocated buffer or the transfer is too large.
3052	 */
3053	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3054		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3055				    GFP_KERNEL | GFP_DMA);
3056		if (!local_buf)
3057			return -ENOMEM;
3058	} else {
3059		local_buf = buf;
3060	}
3061
3062	spi_message_init(&message);
3063	memset(x, 0, sizeof(x));
3064	if (n_tx) {
3065		x[0].len = n_tx;
3066		spi_message_add_tail(&x[0], &message);
3067	}
3068	if (n_rx) {
3069		x[1].len = n_rx;
3070		spi_message_add_tail(&x[1], &message);
3071	}
3072
 
 
 
 
 
 
 
 
3073	memcpy(local_buf, txbuf, n_tx);
3074	x[0].tx_buf = local_buf;
3075	x[1].rx_buf = local_buf + n_tx;
3076
3077	/* do the i/o */
3078	status = spi_sync(spi, &message);
3079	if (status == 0)
3080		memcpy(rxbuf, x[1].rx_buf, n_rx);
3081
3082	if (x[0].tx_buf == buf)
3083		mutex_unlock(&lock);
3084	else
3085		kfree(local_buf);
3086
3087	return status;
3088}
3089EXPORT_SYMBOL_GPL(spi_write_then_read);
3090
3091/*-------------------------------------------------------------------------*/
3092
3093#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3094static int __spi_of_device_match(struct device *dev, void *data)
3095{
3096	return dev->of_node == data;
3097}
3098
3099/* must call put_device() when done with returned spi_device device */
3100static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3101{
3102	struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3103						__spi_of_device_match);
3104	return dev ? to_spi_device(dev) : NULL;
3105}
3106
3107static int __spi_of_master_match(struct device *dev, const void *data)
3108{
3109	return dev->of_node == data;
3110}
3111
3112/* the spi masters are not using spi_bus, so we find it with another way */
3113static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
3114{
3115	struct device *dev;
3116
3117	dev = class_find_device(&spi_master_class, NULL, node,
3118				__spi_of_master_match);
3119	if (!dev)
3120		return NULL;
3121
3122	/* reference got in class_find_device */
3123	return container_of(dev, struct spi_master, dev);
3124}
3125
3126static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3127			 void *arg)
3128{
3129	struct of_reconfig_data *rd = arg;
3130	struct spi_master *master;
3131	struct spi_device *spi;
3132
3133	switch (of_reconfig_get_state_change(action, arg)) {
3134	case OF_RECONFIG_CHANGE_ADD:
3135		master = of_find_spi_master_by_node(rd->dn->parent);
3136		if (master == NULL)
3137			return NOTIFY_OK;	/* not for us */
3138
3139		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3140			put_device(&master->dev);
3141			return NOTIFY_OK;
3142		}
3143
3144		spi = of_register_spi_device(master, rd->dn);
3145		put_device(&master->dev);
3146
3147		if (IS_ERR(spi)) {
3148			pr_err("%s: failed to create for '%s'\n",
3149					__func__, rd->dn->full_name);
3150			of_node_clear_flag(rd->dn, OF_POPULATED);
3151			return notifier_from_errno(PTR_ERR(spi));
3152		}
3153		break;
3154
3155	case OF_RECONFIG_CHANGE_REMOVE:
3156		/* already depopulated? */
3157		if (!of_node_check_flag(rd->dn, OF_POPULATED))
3158			return NOTIFY_OK;
3159
3160		/* find our device by node */
3161		spi = of_find_spi_device_by_node(rd->dn);
3162		if (spi == NULL)
3163			return NOTIFY_OK;	/* no? not meant for us */
3164
3165		/* unregister takes one ref away */
3166		spi_unregister_device(spi);
3167
3168		/* and put the reference of the find */
3169		put_device(&spi->dev);
3170		break;
3171	}
3172
3173	return NOTIFY_OK;
3174}
3175
3176static struct notifier_block spi_of_notifier = {
3177	.notifier_call = of_spi_notify,
3178};
3179#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3180extern struct notifier_block spi_of_notifier;
3181#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3182
3183#if IS_ENABLED(CONFIG_ACPI)
3184static int spi_acpi_master_match(struct device *dev, const void *data)
3185{
3186	return ACPI_COMPANION(dev->parent) == data;
3187}
3188
3189static int spi_acpi_device_match(struct device *dev, void *data)
3190{
3191	return ACPI_COMPANION(dev) == data;
3192}
3193
3194static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
3195{
3196	struct device *dev;
3197
3198	dev = class_find_device(&spi_master_class, NULL, adev,
3199				spi_acpi_master_match);
3200	if (!dev)
3201		return NULL;
3202
3203	return container_of(dev, struct spi_master, dev);
3204}
3205
3206static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3207{
3208	struct device *dev;
3209
3210	dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3211
3212	return dev ? to_spi_device(dev) : NULL;
3213}
3214
3215static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3216			   void *arg)
3217{
3218	struct acpi_device *adev = arg;
3219	struct spi_master *master;
3220	struct spi_device *spi;
3221
3222	switch (value) {
3223	case ACPI_RECONFIG_DEVICE_ADD:
3224		master = acpi_spi_find_master_by_adev(adev->parent);
3225		if (!master)
3226			break;
3227
3228		acpi_register_spi_device(master, adev);
3229		put_device(&master->dev);
3230		break;
3231	case ACPI_RECONFIG_DEVICE_REMOVE:
3232		if (!acpi_device_enumerated(adev))
3233			break;
3234
3235		spi = acpi_spi_find_device_by_adev(adev);
3236		if (!spi)
3237			break;
3238
3239		spi_unregister_device(spi);
3240		put_device(&spi->dev);
3241		break;
3242	}
3243
3244	return NOTIFY_OK;
3245}
3246
3247static struct notifier_block spi_acpi_notifier = {
3248	.notifier_call = acpi_spi_notify,
3249};
3250#else
3251extern struct notifier_block spi_acpi_notifier;
3252#endif
3253
3254static int __init spi_init(void)
3255{
3256	int	status;
3257
3258	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3259	if (!buf) {
3260		status = -ENOMEM;
3261		goto err0;
3262	}
3263
3264	status = bus_register(&spi_bus_type);
3265	if (status < 0)
3266		goto err1;
3267
3268	status = class_register(&spi_master_class);
3269	if (status < 0)
3270		goto err2;
3271
3272	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3273		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3274	if (IS_ENABLED(CONFIG_ACPI))
3275		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3276
3277	return 0;
3278
3279err2:
3280	bus_unregister(&spi_bus_type);
3281err1:
3282	kfree(buf);
3283	buf = NULL;
3284err0:
3285	return status;
3286}
3287
3288/* board_info is normally registered in arch_initcall(),
3289 * but even essential drivers wait till later
3290 *
3291 * REVISIT only boardinfo really needs static linking. the rest (device and
3292 * driver registration) _could_ be dynamically linked (modular) ... costs
3293 * include needing to have boardinfo data structures be much more public.
3294 */
3295postcore_initcall(spi_init);
3296