Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Designware SPI core controller driver (refer pxa2xx_spi.c)
   4 *
   5 * Copyright (c) 2009, Intel Corporation.
   6 */
   7
   8#include <linux/bitfield.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/interrupt.h>
  11#include <linux/module.h>
  12#include <linux/preempt.h>
  13#include <linux/highmem.h>
  14#include <linux/delay.h>
  15#include <linux/slab.h>
  16#include <linux/spi/spi.h>
  17#include <linux/spi/spi-mem.h>
  18#include <linux/string.h>
  19#include <linux/of.h>
  20
  21#include "spi-dw.h"
  22
  23#ifdef CONFIG_DEBUG_FS
  24#include <linux/debugfs.h>
  25#endif
  26
  27/* Slave spi_device related */
  28struct dw_spi_chip_data {
  29	u32 cr0;
  30	u32 rx_sample_dly;	/* RX sample delay */
  31};
  32
  33#ifdef CONFIG_DEBUG_FS
  34
  35#define DW_SPI_DBGFS_REG(_name, _off)	\
  36{					\
  37	.name = _name,			\
  38	.offset = _off,			\
  39}
  40
  41static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
  42	DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
  43	DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
  44	DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
  45	DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
  46	DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
  47	DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
  48	DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
  49	DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
  50	DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
  51	DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
  52	DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
  53	DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
  54	DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
  55	DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
  56	DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
  57	DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
  58};
  59
  60static void dw_spi_debugfs_init(struct dw_spi *dws)
  61{
  62	char name[32];
  63
  64	snprintf(name, 32, "dw_spi%d", dws->host->bus_num);
  65	dws->debugfs = debugfs_create_dir(name, NULL);
  66
  67	dws->regset.regs = dw_spi_dbgfs_regs;
  68	dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
  69	dws->regset.base = dws->regs;
  70	debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
  71}
  72
  73static void dw_spi_debugfs_remove(struct dw_spi *dws)
  74{
  75	debugfs_remove_recursive(dws->debugfs);
  76}
  77
  78#else
  79static inline void dw_spi_debugfs_init(struct dw_spi *dws)
  80{
  81}
  82
  83static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
  84{
  85}
  86#endif /* CONFIG_DEBUG_FS */
  87
  88void dw_spi_set_cs(struct spi_device *spi, bool enable)
  89{
  90	struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
  91	bool cs_high = !!(spi->mode & SPI_CS_HIGH);
  92
  93	/*
  94	 * DW SPI controller demands any native CS being set in order to
  95	 * proceed with data transfer. So in order to activate the SPI
  96	 * communications we must set a corresponding bit in the Slave
  97	 * Enable register no matter whether the SPI core is configured to
  98	 * support active-high or active-low CS level.
  99	 */
 100	if (cs_high == enable)
 101		dw_writel(dws, DW_SPI_SER, BIT(spi_get_chipselect(spi, 0)));
 102	else
 103		dw_writel(dws, DW_SPI_SER, 0);
 104}
 105EXPORT_SYMBOL_NS_GPL(dw_spi_set_cs, SPI_DW_CORE);
 106
 107/* Return the max entries we can fill into tx fifo */
 108static inline u32 dw_spi_tx_max(struct dw_spi *dws)
 109{
 110	u32 tx_room, rxtx_gap;
 111
 112	tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
 113
 114	/*
 115	 * Another concern is about the tx/rx mismatch, we
 116	 * though to use (dws->fifo_len - rxflr - txflr) as
 117	 * one maximum value for tx, but it doesn't cover the
 118	 * data which is out of tx/rx fifo and inside the
 119	 * shift registers. So a control from sw point of
 120	 * view is taken.
 121	 */
 122	rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
 123
 124	return min3((u32)dws->tx_len, tx_room, rxtx_gap);
 125}
 126
 127/* Return the max entries we should read out of rx fifo */
 128static inline u32 dw_spi_rx_max(struct dw_spi *dws)
 129{
 130	return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
 131}
 132
 133static void dw_writer(struct dw_spi *dws)
 134{
 135	u32 max = dw_spi_tx_max(dws);
 136	u32 txw = 0;
 137
 138	while (max--) {
 139		if (dws->tx) {
 140			if (dws->n_bytes == 1)
 141				txw = *(u8 *)(dws->tx);
 142			else if (dws->n_bytes == 2)
 143				txw = *(u16 *)(dws->tx);
 144			else
 145				txw = *(u32 *)(dws->tx);
 146
 147			dws->tx += dws->n_bytes;
 148		}
 149		dw_write_io_reg(dws, DW_SPI_DR, txw);
 150		--dws->tx_len;
 151	}
 152}
 153
 154static void dw_reader(struct dw_spi *dws)
 155{
 156	u32 max = dw_spi_rx_max(dws);
 157	u32 rxw;
 158
 159	while (max--) {
 160		rxw = dw_read_io_reg(dws, DW_SPI_DR);
 161		if (dws->rx) {
 162			if (dws->n_bytes == 1)
 163				*(u8 *)(dws->rx) = rxw;
 164			else if (dws->n_bytes == 2)
 165				*(u16 *)(dws->rx) = rxw;
 166			else
 167				*(u32 *)(dws->rx) = rxw;
 168
 169			dws->rx += dws->n_bytes;
 170		}
 171		--dws->rx_len;
 172	}
 173}
 174
 175int dw_spi_check_status(struct dw_spi *dws, bool raw)
 176{
 177	u32 irq_status;
 178	int ret = 0;
 179
 180	if (raw)
 181		irq_status = dw_readl(dws, DW_SPI_RISR);
 182	else
 183		irq_status = dw_readl(dws, DW_SPI_ISR);
 184
 185	if (irq_status & DW_SPI_INT_RXOI) {
 186		dev_err(&dws->host->dev, "RX FIFO overflow detected\n");
 187		ret = -EIO;
 188	}
 189
 190	if (irq_status & DW_SPI_INT_RXUI) {
 191		dev_err(&dws->host->dev, "RX FIFO underflow detected\n");
 192		ret = -EIO;
 193	}
 194
 195	if (irq_status & DW_SPI_INT_TXOI) {
 196		dev_err(&dws->host->dev, "TX FIFO overflow detected\n");
 197		ret = -EIO;
 198	}
 199
 200	/* Generically handle the erroneous situation */
 201	if (ret) {
 202		dw_spi_reset_chip(dws);
 203		if (dws->host->cur_msg)
 204			dws->host->cur_msg->status = ret;
 205	}
 206
 207	return ret;
 208}
 209EXPORT_SYMBOL_NS_GPL(dw_spi_check_status, SPI_DW_CORE);
 210
 211static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
 212{
 213	u16 irq_status = dw_readl(dws, DW_SPI_ISR);
 214
 215	if (dw_spi_check_status(dws, false)) {
 216		spi_finalize_current_transfer(dws->host);
 217		return IRQ_HANDLED;
 218	}
 219
 220	/*
 221	 * Read data from the Rx FIFO every time we've got a chance executing
 222	 * this method. If there is nothing left to receive, terminate the
 223	 * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
 224	 * final stage of the transfer. By doing so we'll get the next IRQ
 225	 * right when the leftover incoming data is received.
 226	 */
 227	dw_reader(dws);
 228	if (!dws->rx_len) {
 229		dw_spi_mask_intr(dws, 0xff);
 230		spi_finalize_current_transfer(dws->host);
 231	} else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
 232		dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
 233	}
 234
 235	/*
 236	 * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
 237	 * disabled after the data transmission is finished so not to
 238	 * have the TXE IRQ flood at the final stage of the transfer.
 239	 */
 240	if (irq_status & DW_SPI_INT_TXEI) {
 241		dw_writer(dws);
 242		if (!dws->tx_len)
 243			dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
 244	}
 245
 246	return IRQ_HANDLED;
 247}
 248
 249static irqreturn_t dw_spi_irq(int irq, void *dev_id)
 250{
 251	struct spi_controller *host = dev_id;
 252	struct dw_spi *dws = spi_controller_get_devdata(host);
 253	u16 irq_status = dw_readl(dws, DW_SPI_ISR) & DW_SPI_INT_MASK;
 254
 255	if (!irq_status)
 256		return IRQ_NONE;
 257
 258	if (!host->cur_msg) {
 259		dw_spi_mask_intr(dws, 0xff);
 260		return IRQ_HANDLED;
 261	}
 262
 263	return dws->transfer_handler(dws);
 264}
 265
 266static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
 267{
 268	u32 cr0 = 0;
 269
 270	if (dw_spi_ip_is(dws, PSSI)) {
 271		/* CTRLR0[ 5: 4] Frame Format */
 272		cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
 273
 274		/*
 275		 * SPI mode (SCPOL|SCPH)
 276		 * CTRLR0[ 6] Serial Clock Phase
 277		 * CTRLR0[ 7] Serial Clock Polarity
 278		 */
 279		if (spi->mode & SPI_CPOL)
 280			cr0 |= DW_PSSI_CTRLR0_SCPOL;
 281		if (spi->mode & SPI_CPHA)
 282			cr0 |= DW_PSSI_CTRLR0_SCPHA;
 283
 284		/* CTRLR0[11] Shift Register Loop */
 285		if (spi->mode & SPI_LOOP)
 286			cr0 |= DW_PSSI_CTRLR0_SRL;
 287	} else {
 288		/* CTRLR0[ 7: 6] Frame Format */
 289		cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
 290
 291		/*
 292		 * SPI mode (SCPOL|SCPH)
 293		 * CTRLR0[ 8] Serial Clock Phase
 294		 * CTRLR0[ 9] Serial Clock Polarity
 295		 */
 296		if (spi->mode & SPI_CPOL)
 297			cr0 |= DW_HSSI_CTRLR0_SCPOL;
 298		if (spi->mode & SPI_CPHA)
 299			cr0 |= DW_HSSI_CTRLR0_SCPHA;
 300
 301		/* CTRLR0[13] Shift Register Loop */
 302		if (spi->mode & SPI_LOOP)
 303			cr0 |= DW_HSSI_CTRLR0_SRL;
 304
 305		/* CTRLR0[31] MST */
 306		if (dw_spi_ver_is_ge(dws, HSSI, 102A))
 307			cr0 |= DW_HSSI_CTRLR0_MST;
 308	}
 309
 310	return cr0;
 311}
 312
 313void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
 314			  struct dw_spi_cfg *cfg)
 315{
 316	struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
 317	u32 cr0 = chip->cr0;
 318	u32 speed_hz;
 319	u16 clk_div;
 320
 321	/* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */
 322	cr0 |= (cfg->dfs - 1) << dws->dfs_offset;
 323
 324	if (dw_spi_ip_is(dws, PSSI))
 325		/* CTRLR0[ 9:8] Transfer Mode */
 326		cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_TMOD_MASK, cfg->tmode);
 327	else
 328		/* CTRLR0[11:10] Transfer Mode */
 329		cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_TMOD_MASK, cfg->tmode);
 330
 331	dw_writel(dws, DW_SPI_CTRLR0, cr0);
 332
 333	if (cfg->tmode == DW_SPI_CTRLR0_TMOD_EPROMREAD ||
 334	    cfg->tmode == DW_SPI_CTRLR0_TMOD_RO)
 335		dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
 336
 337	/* Note DW APB SSI clock divider doesn't support odd numbers */
 338	clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
 339	speed_hz = dws->max_freq / clk_div;
 340
 341	if (dws->current_freq != speed_hz) {
 342		dw_spi_set_clk(dws, clk_div);
 343		dws->current_freq = speed_hz;
 344	}
 345
 346	/* Update RX sample delay if required */
 347	if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
 348		dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
 349		dws->cur_rx_sample_dly = chip->rx_sample_dly;
 350	}
 351}
 352EXPORT_SYMBOL_NS_GPL(dw_spi_update_config, SPI_DW_CORE);
 353
 354static void dw_spi_irq_setup(struct dw_spi *dws)
 355{
 356	u16 level;
 357	u8 imask;
 358
 359	/*
 360	 * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
 361	 * will be adjusted at the final stage of the IRQ-based SPI transfer
 362	 * execution so not to lose the leftover of the incoming data.
 363	 */
 364	level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
 365	dw_writel(dws, DW_SPI_TXFTLR, level);
 366	dw_writel(dws, DW_SPI_RXFTLR, level - 1);
 367
 368	dws->transfer_handler = dw_spi_transfer_handler;
 369
 370	imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI |
 371		DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
 372	dw_spi_umask_intr(dws, imask);
 373}
 374
 375/*
 376 * The iterative procedure of the poll-based transfer is simple: write as much
 377 * as possible to the Tx FIFO, wait until the pending to receive data is ready
 378 * to be read, read it from the Rx FIFO and check whether the performed
 379 * procedure has been successful.
 380 *
 381 * Note this method the same way as the IRQ-based transfer won't work well for
 382 * the SPI devices connected to the controller with native CS due to the
 383 * automatic CS assertion/de-assertion.
 384 */
 385static int dw_spi_poll_transfer(struct dw_spi *dws,
 386				struct spi_transfer *transfer)
 387{
 388	struct spi_delay delay;
 389	u16 nbits;
 390	int ret;
 391
 392	delay.unit = SPI_DELAY_UNIT_SCK;
 393	nbits = dws->n_bytes * BITS_PER_BYTE;
 394
 395	do {
 396		dw_writer(dws);
 397
 398		delay.value = nbits * (dws->rx_len - dws->tx_len);
 399		spi_delay_exec(&delay, transfer);
 400
 401		dw_reader(dws);
 402
 403		ret = dw_spi_check_status(dws, true);
 404		if (ret)
 405			return ret;
 406	} while (dws->rx_len);
 407
 408	return 0;
 409}
 410
 411static int dw_spi_transfer_one(struct spi_controller *host,
 412			       struct spi_device *spi,
 413			       struct spi_transfer *transfer)
 414{
 415	struct dw_spi *dws = spi_controller_get_devdata(host);
 416	struct dw_spi_cfg cfg = {
 417		.tmode = DW_SPI_CTRLR0_TMOD_TR,
 418		.dfs = transfer->bits_per_word,
 419		.freq = transfer->speed_hz,
 420	};
 421	int ret;
 422
 423	dws->dma_mapped = 0;
 424	dws->n_bytes =
 425		roundup_pow_of_two(DIV_ROUND_UP(transfer->bits_per_word,
 426						BITS_PER_BYTE));
 427
 428	dws->tx = (void *)transfer->tx_buf;
 429	dws->tx_len = transfer->len / dws->n_bytes;
 430	dws->rx = transfer->rx_buf;
 431	dws->rx_len = dws->tx_len;
 432
 433	/* Ensure the data above is visible for all CPUs */
 434	smp_mb();
 435
 436	dw_spi_enable_chip(dws, 0);
 437
 438	dw_spi_update_config(dws, spi, &cfg);
 439
 440	transfer->effective_speed_hz = dws->current_freq;
 441
 442	/* Check if current transfer is a DMA transaction */
 443	if (host->can_dma && host->can_dma(host, spi, transfer))
 444		dws->dma_mapped = host->cur_msg_mapped;
 445
 446	/* For poll mode just disable all interrupts */
 447	dw_spi_mask_intr(dws, 0xff);
 448
 449	if (dws->dma_mapped) {
 450		ret = dws->dma_ops->dma_setup(dws, transfer);
 451		if (ret)
 452			return ret;
 453	}
 454
 455	dw_spi_enable_chip(dws, 1);
 456
 457	if (dws->dma_mapped)
 458		return dws->dma_ops->dma_transfer(dws, transfer);
 459	else if (dws->irq == IRQ_NOTCONNECTED)
 460		return dw_spi_poll_transfer(dws, transfer);
 461
 462	dw_spi_irq_setup(dws);
 463
 464	return 1;
 465}
 466
 467static void dw_spi_handle_err(struct spi_controller *host,
 468			      struct spi_message *msg)
 469{
 470	struct dw_spi *dws = spi_controller_get_devdata(host);
 471
 472	if (dws->dma_mapped)
 473		dws->dma_ops->dma_stop(dws);
 474
 475	dw_spi_reset_chip(dws);
 476}
 477
 478static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
 479{
 480	if (op->data.dir == SPI_MEM_DATA_IN)
 481		op->data.nbytes = clamp_val(op->data.nbytes, 0, DW_SPI_NDF_MASK + 1);
 482
 483	return 0;
 484}
 485
 486static bool dw_spi_supports_mem_op(struct spi_mem *mem,
 487				   const struct spi_mem_op *op)
 488{
 489	if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
 490	    op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
 491		return false;
 492
 493	return spi_mem_default_supports_op(mem, op);
 494}
 495
 496static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
 497{
 498	unsigned int i, j, len;
 499	u8 *out;
 500
 501	/*
 502	 * Calculate the total length of the EEPROM command transfer and
 503	 * either use the pre-allocated buffer or create a temporary one.
 504	 */
 505	len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
 506	if (op->data.dir == SPI_MEM_DATA_OUT)
 507		len += op->data.nbytes;
 508
 509	if (len <= DW_SPI_BUF_SIZE) {
 510		out = dws->buf;
 511	} else {
 512		out = kzalloc(len, GFP_KERNEL);
 513		if (!out)
 514			return -ENOMEM;
 515	}
 516
 517	/*
 518	 * Collect the operation code, address and dummy bytes into the single
 519	 * buffer. If it's a transfer with data to be sent, also copy it into the
 520	 * single buffer in order to speed the data transmission up.
 521	 */
 522	for (i = 0; i < op->cmd.nbytes; ++i)
 523		out[i] = DW_SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
 524	for (j = 0; j < op->addr.nbytes; ++i, ++j)
 525		out[i] = DW_SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
 526	for (j = 0; j < op->dummy.nbytes; ++i, ++j)
 527		out[i] = 0x0;
 528
 529	if (op->data.dir == SPI_MEM_DATA_OUT)
 530		memcpy(&out[i], op->data.buf.out, op->data.nbytes);
 531
 532	dws->n_bytes = 1;
 533	dws->tx = out;
 534	dws->tx_len = len;
 535	if (op->data.dir == SPI_MEM_DATA_IN) {
 536		dws->rx = op->data.buf.in;
 537		dws->rx_len = op->data.nbytes;
 538	} else {
 539		dws->rx = NULL;
 540		dws->rx_len = 0;
 541	}
 542
 543	return 0;
 544}
 545
 546static void dw_spi_free_mem_buf(struct dw_spi *dws)
 547{
 548	if (dws->tx != dws->buf)
 549		kfree(dws->tx);
 550}
 551
 552static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
 553{
 554	u32 room, entries, sts;
 555	unsigned int len;
 556	u8 *buf;
 557
 558	/*
 559	 * At initial stage we just pre-fill the Tx FIFO in with no rush,
 560	 * since native CS hasn't been enabled yet and the automatic data
 561	 * transmission won't start til we do that.
 562	 */
 563	len = min(dws->fifo_len, dws->tx_len);
 564	buf = dws->tx;
 565	while (len--)
 566		dw_write_io_reg(dws, DW_SPI_DR, *buf++);
 567
 568	/*
 569	 * After setting any bit in the SER register the transmission will
 570	 * start automatically. We have to keep up with that procedure
 571	 * otherwise the CS de-assertion will happen whereupon the memory
 572	 * operation will be pre-terminated.
 573	 */
 574	len = dws->tx_len - ((void *)buf - dws->tx);
 575	dw_spi_set_cs(spi, false);
 576	while (len) {
 577		entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
 578		if (!entries) {
 579			dev_err(&dws->host->dev, "CS de-assertion on Tx\n");
 580			return -EIO;
 581		}
 582		room = min(dws->fifo_len - entries, len);
 583		for (; room; --room, --len)
 584			dw_write_io_reg(dws, DW_SPI_DR, *buf++);
 585	}
 586
 587	/*
 588	 * Data fetching will start automatically if the EEPROM-read mode is
 589	 * activated. We have to keep up with the incoming data pace to
 590	 * prevent the Rx FIFO overflow causing the inbound data loss.
 591	 */
 592	len = dws->rx_len;
 593	buf = dws->rx;
 594	while (len) {
 595		entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
 596		if (!entries) {
 597			sts = readl_relaxed(dws->regs + DW_SPI_RISR);
 598			if (sts & DW_SPI_INT_RXOI) {
 599				dev_err(&dws->host->dev, "FIFO overflow on Rx\n");
 600				return -EIO;
 601			}
 602			continue;
 603		}
 604		entries = min(entries, len);
 605		for (; entries; --entries, --len)
 606			*buf++ = dw_read_io_reg(dws, DW_SPI_DR);
 607	}
 608
 609	return 0;
 610}
 611
 612static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
 613{
 614	return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
 615}
 616
 617static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
 618{
 619	int retry = DW_SPI_WAIT_RETRIES;
 620	struct spi_delay delay;
 621	unsigned long ns, us;
 622	u32 nents;
 623
 624	nents = dw_readl(dws, DW_SPI_TXFLR);
 625	ns = NSEC_PER_SEC / dws->current_freq * nents;
 626	ns *= dws->n_bytes * BITS_PER_BYTE;
 627	if (ns <= NSEC_PER_USEC) {
 628		delay.unit = SPI_DELAY_UNIT_NSECS;
 629		delay.value = ns;
 630	} else {
 631		us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
 632		delay.unit = SPI_DELAY_UNIT_USECS;
 633		delay.value = clamp_val(us, 0, USHRT_MAX);
 634	}
 635
 636	while (dw_spi_ctlr_busy(dws) && retry--)
 637		spi_delay_exec(&delay, NULL);
 638
 639	if (retry < 0) {
 640		dev_err(&dws->host->dev, "Mem op hanged up\n");
 641		return -EIO;
 642	}
 643
 644	return 0;
 645}
 646
 647static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
 648{
 649	dw_spi_enable_chip(dws, 0);
 650	dw_spi_set_cs(spi, true);
 651	dw_spi_enable_chip(dws, 1);
 652}
 653
 654/*
 655 * The SPI memory operation implementation below is the best choice for the
 656 * devices, which are selected by the native chip-select lane. It's
 657 * specifically developed to workaround the problem with automatic chip-select
 658 * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
 659 * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
 660 * unavailable.
 661 */
 662static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
 663{
 664	struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
 665	struct dw_spi_cfg cfg;
 666	unsigned long flags;
 667	int ret;
 668
 669	/*
 670	 * Collect the outbound data into a single buffer to speed the
 671	 * transmission up at least on the initial stage.
 672	 */
 673	ret = dw_spi_init_mem_buf(dws, op);
 674	if (ret)
 675		return ret;
 676
 677	/*
 678	 * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
 679	 * operation. Transmit-only mode is suitable for the rest of them.
 680	 */
 681	cfg.dfs = 8;
 682	cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
 683	if (op->data.dir == SPI_MEM_DATA_IN) {
 684		cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
 685		cfg.ndf = op->data.nbytes;
 686	} else {
 687		cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
 688	}
 689
 690	dw_spi_enable_chip(dws, 0);
 691
 692	dw_spi_update_config(dws, mem->spi, &cfg);
 693
 694	dw_spi_mask_intr(dws, 0xff);
 695
 696	dw_spi_enable_chip(dws, 1);
 697
 698	/*
 699	 * DW APB SSI controller has very nasty peculiarities. First originally
 700	 * (without any vendor-specific modifications) it doesn't provide a
 701	 * direct way to set and clear the native chip-select signal. Instead
 702	 * the controller asserts the CS lane if Tx FIFO isn't empty and a
 703	 * transmission is going on, and automatically de-asserts it back to
 704	 * the high level if the Tx FIFO doesn't have anything to be pushed
 705	 * out. Due to that a multi-tasking or heavy IRQs activity might be
 706	 * fatal, since the transfer procedure preemption may cause the Tx FIFO
 707	 * getting empty and sudden CS de-assertion, which in the middle of the
 708	 * transfer will most likely cause the data loss. Secondly the
 709	 * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
 710	 * data being automatically pulled in into the Rx FIFO. So if the
 711	 * driver software is late in fetching the data from the FIFO before
 712	 * it's overflown, new incoming data will be lost. In order to make
 713	 * sure the executed memory operations are CS-atomic and to prevent the
 714	 * Rx FIFO overflow we have to disable the local interrupts so to block
 715	 * any preemption during the subsequent IO operations.
 716	 *
 717	 * Note. At some circumstances disabling IRQs may not help to prevent
 718	 * the problems described above. The CS de-assertion and Rx FIFO
 719	 * overflow may still happen due to the relatively slow system bus or
 720	 * CPU not working fast enough, so the write-then-read algo implemented
 721	 * here just won't keep up with the SPI bus data transfer. Such
 722	 * situation is highly platform specific and is supposed to be fixed by
 723	 * manually restricting the SPI bus frequency using the
 724	 * dws->max_mem_freq parameter.
 725	 */
 726	local_irq_save(flags);
 727	preempt_disable();
 728
 729	ret = dw_spi_write_then_read(dws, mem->spi);
 730
 731	local_irq_restore(flags);
 732	preempt_enable();
 733
 734	/*
 735	 * Wait for the operation being finished and check the controller
 736	 * status only if there hasn't been any run-time error detected. In the
 737	 * former case it's just pointless. In the later one to prevent an
 738	 * additional error message printing since any hw error flag being set
 739	 * would be due to an error detected on the data transfer.
 740	 */
 741	if (!ret) {
 742		ret = dw_spi_wait_mem_op_done(dws);
 743		if (!ret)
 744			ret = dw_spi_check_status(dws, true);
 745	}
 746
 747	dw_spi_stop_mem_op(dws, mem->spi);
 748
 749	dw_spi_free_mem_buf(dws);
 750
 751	return ret;
 752}
 753
 754/*
 755 * Initialize the default memory operations if a glue layer hasn't specified
 756 * custom ones. Direct mapping operations will be preserved anyway since DW SPI
 757 * controller doesn't have an embedded dirmap interface. Note the memory
 758 * operations implemented in this driver is the best choice only for the DW APB
 759 * SSI controller with standard native CS functionality. If a hardware vendor
 760 * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
 761 * be safer to use the normal SPI-messages-based transfers implementation.
 762 */
 763static void dw_spi_init_mem_ops(struct dw_spi *dws)
 764{
 765	if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
 766	    !dws->set_cs) {
 767		dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
 768		dws->mem_ops.supports_op = dw_spi_supports_mem_op;
 769		dws->mem_ops.exec_op = dw_spi_exec_mem_op;
 770		if (!dws->max_mem_freq)
 771			dws->max_mem_freq = dws->max_freq;
 772	}
 773}
 774
 775/* This may be called twice for each spi dev */
 776static int dw_spi_setup(struct spi_device *spi)
 777{
 778	struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
 779	struct dw_spi_chip_data *chip;
 780
 781	/* Only alloc on first setup */
 782	chip = spi_get_ctldata(spi);
 783	if (!chip) {
 784		struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
 785		u32 rx_sample_dly_ns;
 786
 787		chip = kzalloc(sizeof(*chip), GFP_KERNEL);
 788		if (!chip)
 789			return -ENOMEM;
 790		spi_set_ctldata(spi, chip);
 791		/* Get specific / default rx-sample-delay */
 792		if (device_property_read_u32(&spi->dev,
 793					     "rx-sample-delay-ns",
 794					     &rx_sample_dly_ns) != 0)
 795			/* Use default controller value */
 796			rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
 797		chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
 798							NSEC_PER_SEC /
 799							dws->max_freq);
 800	}
 801
 802	/*
 803	 * Update CR0 data each time the setup callback is invoked since
 804	 * the device parameters could have been changed, for instance, by
 805	 * the MMC SPI driver or something else.
 806	 */
 807	chip->cr0 = dw_spi_prepare_cr0(dws, spi);
 808
 809	return 0;
 810}
 811
 812static void dw_spi_cleanup(struct spi_device *spi)
 813{
 814	struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
 815
 816	kfree(chip);
 817	spi_set_ctldata(spi, NULL);
 818}
 819
 820/* Restart the controller, disable all interrupts, clean rx fifo */
 821static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
 822{
 823	dw_spi_reset_chip(dws);
 824
 825	/*
 826	 * Retrieve the Synopsys component version if it hasn't been specified
 827	 * by the platform. CoreKit version ID is encoded as a 3-chars ASCII
 828	 * code enclosed with '*' (typical for the most of Synopsys IP-cores).
 829	 */
 830	if (!dws->ver) {
 831		dws->ver = dw_readl(dws, DW_SPI_VERSION);
 832
 833		dev_dbg(dev, "Synopsys DWC%sSSI v%c.%c%c\n",
 834			dw_spi_ip_is(dws, PSSI) ? " APB " : " ",
 835			DW_SPI_GET_BYTE(dws->ver, 3), DW_SPI_GET_BYTE(dws->ver, 2),
 836			DW_SPI_GET_BYTE(dws->ver, 1));
 837	}
 838
 839	/*
 840	 * Try to detect the FIFO depth if not set by interface driver,
 841	 * the depth could be from 2 to 256 from HW spec
 842	 */
 843	if (!dws->fifo_len) {
 844		u32 fifo;
 845
 846		for (fifo = 1; fifo < 256; fifo++) {
 847			dw_writel(dws, DW_SPI_TXFTLR, fifo);
 848			if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
 849				break;
 850		}
 851		dw_writel(dws, DW_SPI_TXFTLR, 0);
 852
 853		dws->fifo_len = (fifo == 1) ? 0 : fifo;
 854		dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
 855	}
 856
 857	/*
 858	 * Detect CTRLR0.DFS field size and offset by testing the lowest bits
 859	 * writability. Note DWC SSI controller also has the extended DFS, but
 860	 * with zero offset.
 861	 */
 862	if (dw_spi_ip_is(dws, PSSI)) {
 863		u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0);
 864
 865		dw_spi_enable_chip(dws, 0);
 866		dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff);
 867		cr0 = dw_readl(dws, DW_SPI_CTRLR0);
 868		dw_writel(dws, DW_SPI_CTRLR0, tmp);
 869		dw_spi_enable_chip(dws, 1);
 870
 871		if (!(cr0 & DW_PSSI_CTRLR0_DFS_MASK)) {
 872			dws->caps |= DW_SPI_CAP_DFS32;
 873			dws->dfs_offset = __bf_shf(DW_PSSI_CTRLR0_DFS32_MASK);
 874			dev_dbg(dev, "Detected 32-bits max data frame size\n");
 875		}
 876	} else {
 877		dws->caps |= DW_SPI_CAP_DFS32;
 878	}
 879
 880	/* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
 881	if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
 882		dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
 883}
 884
 885int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
 886{
 887	struct spi_controller *host;
 888	int ret;
 889
 890	if (!dws)
 891		return -EINVAL;
 892
 893	host = spi_alloc_host(dev, 0);
 894	if (!host)
 895		return -ENOMEM;
 896
 897	device_set_node(&host->dev, dev_fwnode(dev));
 898
 899	dws->host = host;
 900	dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
 901
 902	spi_controller_set_devdata(host, dws);
 903
 904	/* Basic HW init */
 905	dw_spi_hw_init(dev, dws);
 906
 907	ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
 908			  host);
 909	if (ret < 0 && ret != -ENOTCONN) {
 910		dev_err(dev, "can not get IRQ\n");
 911		goto err_free_host;
 912	}
 913
 914	dw_spi_init_mem_ops(dws);
 915
 916	host->use_gpio_descriptors = true;
 917	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
 918	if (dws->caps & DW_SPI_CAP_DFS32)
 919		host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
 920	else
 921		host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
 922	host->bus_num = dws->bus_num;
 923	host->num_chipselect = dws->num_cs;
 924	host->setup = dw_spi_setup;
 925	host->cleanup = dw_spi_cleanup;
 926	if (dws->set_cs)
 927		host->set_cs = dws->set_cs;
 928	else
 929		host->set_cs = dw_spi_set_cs;
 930	host->transfer_one = dw_spi_transfer_one;
 931	host->handle_err = dw_spi_handle_err;
 932	if (dws->mem_ops.exec_op)
 933		host->mem_ops = &dws->mem_ops;
 934	host->max_speed_hz = dws->max_freq;
 935	host->flags = SPI_CONTROLLER_GPIO_SS;
 936	host->auto_runtime_pm = true;
 937
 938	/* Get default rx sample delay */
 939	device_property_read_u32(dev, "rx-sample-delay-ns",
 940				 &dws->def_rx_sample_dly_ns);
 941
 942	if (dws->dma_ops && dws->dma_ops->dma_init) {
 943		ret = dws->dma_ops->dma_init(dev, dws);
 944		if (ret == -EPROBE_DEFER) {
 945			goto err_free_irq;
 946		} else if (ret) {
 947			dev_warn(dev, "DMA init failed\n");
 948		} else {
 949			host->can_dma = dws->dma_ops->can_dma;
 950			host->flags |= SPI_CONTROLLER_MUST_TX;
 951		}
 952	}
 953
 954	ret = spi_register_controller(host);
 955	if (ret) {
 956		dev_err_probe(dev, ret, "problem registering spi host\n");
 957		goto err_dma_exit;
 958	}
 959
 960	dw_spi_debugfs_init(dws);
 961	return 0;
 962
 963err_dma_exit:
 964	if (dws->dma_ops && dws->dma_ops->dma_exit)
 965		dws->dma_ops->dma_exit(dws);
 966	dw_spi_enable_chip(dws, 0);
 967err_free_irq:
 968	free_irq(dws->irq, host);
 969err_free_host:
 970	spi_controller_put(host);
 971	return ret;
 972}
 973EXPORT_SYMBOL_NS_GPL(dw_spi_add_host, SPI_DW_CORE);
 974
 975void dw_spi_remove_host(struct dw_spi *dws)
 976{
 977	dw_spi_debugfs_remove(dws);
 978
 979	spi_unregister_controller(dws->host);
 980
 981	if (dws->dma_ops && dws->dma_ops->dma_exit)
 982		dws->dma_ops->dma_exit(dws);
 983
 984	dw_spi_shutdown_chip(dws);
 985
 986	free_irq(dws->irq, dws->host);
 987}
 988EXPORT_SYMBOL_NS_GPL(dw_spi_remove_host, SPI_DW_CORE);
 989
 990int dw_spi_suspend_host(struct dw_spi *dws)
 991{
 992	int ret;
 993
 994	ret = spi_controller_suspend(dws->host);
 995	if (ret)
 996		return ret;
 997
 998	dw_spi_shutdown_chip(dws);
 999	return 0;
1000}
1001EXPORT_SYMBOL_NS_GPL(dw_spi_suspend_host, SPI_DW_CORE);
1002
1003int dw_spi_resume_host(struct dw_spi *dws)
1004{
1005	dw_spi_hw_init(&dws->host->dev, dws);
1006	return spi_controller_resume(dws->host);
1007}
1008EXPORT_SYMBOL_NS_GPL(dw_spi_resume_host, SPI_DW_CORE);
1009
1010MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
1011MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
1012MODULE_LICENSE("GPL v2");