Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Designware SPI core controller driver (refer pxa2xx_spi.c)
   4 *
   5 * Copyright (c) 2009, Intel Corporation.
   6 */
   7
   8#include <linux/bitfield.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/interrupt.h>
  11#include <linux/module.h>
  12#include <linux/preempt.h>
  13#include <linux/highmem.h>
  14#include <linux/delay.h>
  15#include <linux/slab.h>
  16#include <linux/spi/spi.h>
  17#include <linux/spi/spi-mem.h>
  18#include <linux/string.h>
  19#include <linux/of.h>
  20
  21#include "spi-dw.h"
  22
  23#ifdef CONFIG_DEBUG_FS
  24#include <linux/debugfs.h>
  25#endif
  26
  27/* Slave spi_device related */
  28struct dw_spi_chip_data {
  29	u32 cr0;
  30	u32 rx_sample_dly;	/* RX sample delay */
  31};
  32
  33#ifdef CONFIG_DEBUG_FS
  34
  35#define DW_SPI_DBGFS_REG(_name, _off)	\
  36{					\
  37	.name = _name,			\
  38	.offset = _off,			\
  39}
  40
  41static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
  42	DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
  43	DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
  44	DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
  45	DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
  46	DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
  47	DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
  48	DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
  49	DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
  50	DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
  51	DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
  52	DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
  53	DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
  54	DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
  55	DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
  56	DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
  57	DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
  58};
  59
  60static int dw_spi_debugfs_init(struct dw_spi *dws)
  61{
  62	char name[32];
  63
  64	snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
  65	dws->debugfs = debugfs_create_dir(name, NULL);
  66	if (!dws->debugfs)
  67		return -ENOMEM;
  68
  69	dws->regset.regs = dw_spi_dbgfs_regs;
  70	dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
  71	dws->regset.base = dws->regs;
  72	debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
  73
  74	return 0;
  75}
  76
  77static void dw_spi_debugfs_remove(struct dw_spi *dws)
  78{
  79	debugfs_remove_recursive(dws->debugfs);
  80}
  81
  82#else
  83static inline int dw_spi_debugfs_init(struct dw_spi *dws)
  84{
  85	return 0;
  86}
  87
  88static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
  89{
  90}
  91#endif /* CONFIG_DEBUG_FS */
  92
  93void dw_spi_set_cs(struct spi_device *spi, bool enable)
  94{
  95	struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
  96	bool cs_high = !!(spi->mode & SPI_CS_HIGH);
  97
  98	/*
  99	 * DW SPI controller demands any native CS being set in order to
 100	 * proceed with data transfer. So in order to activate the SPI
 101	 * communications we must set a corresponding bit in the Slave
 102	 * Enable register no matter whether the SPI core is configured to
 103	 * support active-high or active-low CS level.
 104	 */
 105	if (cs_high == enable)
 106		dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
 107	else
 108		dw_writel(dws, DW_SPI_SER, 0);
 109}
 110EXPORT_SYMBOL_NS_GPL(dw_spi_set_cs, SPI_DW_CORE);
 111
 112/* Return the max entries we can fill into tx fifo */
 113static inline u32 dw_spi_tx_max(struct dw_spi *dws)
 114{
 115	u32 tx_room, rxtx_gap;
 116
 117	tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
 118
 119	/*
 120	 * Another concern is about the tx/rx mismatch, we
 121	 * though to use (dws->fifo_len - rxflr - txflr) as
 122	 * one maximum value for tx, but it doesn't cover the
 123	 * data which is out of tx/rx fifo and inside the
 124	 * shift registers. So a control from sw point of
 125	 * view is taken.
 126	 */
 127	rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
 128
 129	return min3((u32)dws->tx_len, tx_room, rxtx_gap);
 130}
 131
 132/* Return the max entries we should read out of rx fifo */
 133static inline u32 dw_spi_rx_max(struct dw_spi *dws)
 134{
 135	return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
 136}
 137
 138static void dw_writer(struct dw_spi *dws)
 139{
 140	u32 max = dw_spi_tx_max(dws);
 141	u32 txw = 0;
 142
 143	while (max--) {
 144		if (dws->tx) {
 145			if (dws->n_bytes == 1)
 146				txw = *(u8 *)(dws->tx);
 147			else if (dws->n_bytes == 2)
 148				txw = *(u16 *)(dws->tx);
 149			else
 150				txw = *(u32 *)(dws->tx);
 151
 152			dws->tx += dws->n_bytes;
 153		}
 154		dw_write_io_reg(dws, DW_SPI_DR, txw);
 155		--dws->tx_len;
 156	}
 157}
 158
 159static void dw_reader(struct dw_spi *dws)
 160{
 161	u32 max = dw_spi_rx_max(dws);
 162	u32 rxw;
 163
 164	while (max--) {
 165		rxw = dw_read_io_reg(dws, DW_SPI_DR);
 166		if (dws->rx) {
 167			if (dws->n_bytes == 1)
 168				*(u8 *)(dws->rx) = rxw;
 169			else if (dws->n_bytes == 2)
 170				*(u16 *)(dws->rx) = rxw;
 171			else
 172				*(u32 *)(dws->rx) = rxw;
 173
 174			dws->rx += dws->n_bytes;
 175		}
 176		--dws->rx_len;
 177	}
 178}
 179
 180int dw_spi_check_status(struct dw_spi *dws, bool raw)
 181{
 182	u32 irq_status;
 183	int ret = 0;
 184
 185	if (raw)
 186		irq_status = dw_readl(dws, DW_SPI_RISR);
 187	else
 188		irq_status = dw_readl(dws, DW_SPI_ISR);
 189
 190	if (irq_status & DW_SPI_INT_RXOI) {
 191		dev_err(&dws->master->dev, "RX FIFO overflow detected\n");
 192		ret = -EIO;
 193	}
 194
 195	if (irq_status & DW_SPI_INT_RXUI) {
 196		dev_err(&dws->master->dev, "RX FIFO underflow detected\n");
 197		ret = -EIO;
 198	}
 199
 200	if (irq_status & DW_SPI_INT_TXOI) {
 201		dev_err(&dws->master->dev, "TX FIFO overflow detected\n");
 202		ret = -EIO;
 203	}
 204
 205	/* Generically handle the erroneous situation */
 206	if (ret) {
 207		dw_spi_reset_chip(dws);
 208		if (dws->master->cur_msg)
 209			dws->master->cur_msg->status = ret;
 210	}
 211
 212	return ret;
 213}
 214EXPORT_SYMBOL_NS_GPL(dw_spi_check_status, SPI_DW_CORE);
 215
 216static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
 217{
 218	u16 irq_status = dw_readl(dws, DW_SPI_ISR);
 219
 220	if (dw_spi_check_status(dws, false)) {
 221		spi_finalize_current_transfer(dws->master);
 222		return IRQ_HANDLED;
 223	}
 224
 225	/*
 226	 * Read data from the Rx FIFO every time we've got a chance executing
 227	 * this method. If there is nothing left to receive, terminate the
 228	 * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
 229	 * final stage of the transfer. By doing so we'll get the next IRQ
 230	 * right when the leftover incoming data is received.
 231	 */
 232	dw_reader(dws);
 233	if (!dws->rx_len) {
 234		dw_spi_mask_intr(dws, 0xff);
 235		spi_finalize_current_transfer(dws->master);
 236	} else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
 237		dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
 238	}
 239
 240	/*
 241	 * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
 242	 * disabled after the data transmission is finished so not to
 243	 * have the TXE IRQ flood at the final stage of the transfer.
 244	 */
 245	if (irq_status & DW_SPI_INT_TXEI) {
 246		dw_writer(dws);
 247		if (!dws->tx_len)
 248			dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
 249	}
 250
 251	return IRQ_HANDLED;
 252}
 253
 254static irqreturn_t dw_spi_irq(int irq, void *dev_id)
 255{
 256	struct spi_controller *master = dev_id;
 257	struct dw_spi *dws = spi_controller_get_devdata(master);
 258	u16 irq_status = dw_readl(dws, DW_SPI_ISR) & DW_SPI_INT_MASK;
 259
 260	if (!irq_status)
 261		return IRQ_NONE;
 262
 263	if (!master->cur_msg) {
 264		dw_spi_mask_intr(dws, 0xff);
 265		return IRQ_HANDLED;
 266	}
 267
 268	return dws->transfer_handler(dws);
 269}
 270
 271static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
 272{
 273	u32 cr0 = 0;
 274
 275	if (dw_spi_ip_is(dws, PSSI)) {
 276		/* CTRLR0[ 5: 4] Frame Format */
 277		cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
 278
 279		/*
 280		 * SPI mode (SCPOL|SCPH)
 281		 * CTRLR0[ 6] Serial Clock Phase
 282		 * CTRLR0[ 7] Serial Clock Polarity
 283		 */
 284		if (spi->mode & SPI_CPOL)
 285			cr0 |= DW_PSSI_CTRLR0_SCPOL;
 286		if (spi->mode & SPI_CPHA)
 287			cr0 |= DW_PSSI_CTRLR0_SCPHA;
 288
 289		/* CTRLR0[11] Shift Register Loop */
 290		if (spi->mode & SPI_LOOP)
 291			cr0 |= DW_PSSI_CTRLR0_SRL;
 292	} else {
 293		/* CTRLR0[ 7: 6] Frame Format */
 294		cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
 295
 296		/*
 297		 * SPI mode (SCPOL|SCPH)
 298		 * CTRLR0[ 8] Serial Clock Phase
 299		 * CTRLR0[ 9] Serial Clock Polarity
 300		 */
 301		if (spi->mode & SPI_CPOL)
 302			cr0 |= DW_HSSI_CTRLR0_SCPOL;
 303		if (spi->mode & SPI_CPHA)
 304			cr0 |= DW_HSSI_CTRLR0_SCPHA;
 305
 306		/* CTRLR0[13] Shift Register Loop */
 307		if (spi->mode & SPI_LOOP)
 308			cr0 |= DW_HSSI_CTRLR0_SRL;
 309
 310		/* CTRLR0[31] MST */
 311		if (dw_spi_ver_is_ge(dws, HSSI, 102A))
 312			cr0 |= DW_HSSI_CTRLR0_MST;
 313	}
 314
 315	return cr0;
 316}
 317
 318void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
 319			  struct dw_spi_cfg *cfg)
 320{
 321	struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
 322	u32 cr0 = chip->cr0;
 323	u32 speed_hz;
 324	u16 clk_div;
 325
 326	/* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */
 327	cr0 |= (cfg->dfs - 1) << dws->dfs_offset;
 328
 329	if (dw_spi_ip_is(dws, PSSI))
 330		/* CTRLR0[ 9:8] Transfer Mode */
 331		cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_TMOD_MASK, cfg->tmode);
 332	else
 333		/* CTRLR0[11:10] Transfer Mode */
 334		cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_TMOD_MASK, cfg->tmode);
 335
 336	dw_writel(dws, DW_SPI_CTRLR0, cr0);
 337
 338	if (cfg->tmode == DW_SPI_CTRLR0_TMOD_EPROMREAD ||
 339	    cfg->tmode == DW_SPI_CTRLR0_TMOD_RO)
 340		dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
 341
 342	/* Note DW APB SSI clock divider doesn't support odd numbers */
 343	clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
 344	speed_hz = dws->max_freq / clk_div;
 345
 346	if (dws->current_freq != speed_hz) {
 347		dw_spi_set_clk(dws, clk_div);
 348		dws->current_freq = speed_hz;
 349	}
 350
 351	/* Update RX sample delay if required */
 352	if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
 353		dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
 354		dws->cur_rx_sample_dly = chip->rx_sample_dly;
 355	}
 356}
 357EXPORT_SYMBOL_NS_GPL(dw_spi_update_config, SPI_DW_CORE);
 358
 359static void dw_spi_irq_setup(struct dw_spi *dws)
 360{
 361	u16 level;
 362	u8 imask;
 363
 364	/*
 365	 * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
 366	 * will be adjusted at the final stage of the IRQ-based SPI transfer
 367	 * execution so not to lose the leftover of the incoming data.
 368	 */
 369	level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
 370	dw_writel(dws, DW_SPI_TXFTLR, level);
 371	dw_writel(dws, DW_SPI_RXFTLR, level - 1);
 372
 373	dws->transfer_handler = dw_spi_transfer_handler;
 374
 375	imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI |
 376		DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
 377	dw_spi_umask_intr(dws, imask);
 378}
 379
 380/*
 381 * The iterative procedure of the poll-based transfer is simple: write as much
 382 * as possible to the Tx FIFO, wait until the pending to receive data is ready
 383 * to be read, read it from the Rx FIFO and check whether the performed
 384 * procedure has been successful.
 385 *
 386 * Note this method the same way as the IRQ-based transfer won't work well for
 387 * the SPI devices connected to the controller with native CS due to the
 388 * automatic CS assertion/de-assertion.
 389 */
 390static int dw_spi_poll_transfer(struct dw_spi *dws,
 391				struct spi_transfer *transfer)
 392{
 393	struct spi_delay delay;
 394	u16 nbits;
 395	int ret;
 396
 397	delay.unit = SPI_DELAY_UNIT_SCK;
 398	nbits = dws->n_bytes * BITS_PER_BYTE;
 399
 400	do {
 401		dw_writer(dws);
 402
 403		delay.value = nbits * (dws->rx_len - dws->tx_len);
 404		spi_delay_exec(&delay, transfer);
 405
 406		dw_reader(dws);
 407
 408		ret = dw_spi_check_status(dws, true);
 409		if (ret)
 410			return ret;
 411	} while (dws->rx_len);
 412
 413	return 0;
 414}
 415
 416static int dw_spi_transfer_one(struct spi_controller *master,
 417			       struct spi_device *spi,
 418			       struct spi_transfer *transfer)
 419{
 420	struct dw_spi *dws = spi_controller_get_devdata(master);
 421	struct dw_spi_cfg cfg = {
 422		.tmode = DW_SPI_CTRLR0_TMOD_TR,
 423		.dfs = transfer->bits_per_word,
 424		.freq = transfer->speed_hz,
 425	};
 426	int ret;
 427
 428	dws->dma_mapped = 0;
 429	dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
 430	dws->tx = (void *)transfer->tx_buf;
 431	dws->tx_len = transfer->len / dws->n_bytes;
 432	dws->rx = transfer->rx_buf;
 433	dws->rx_len = dws->tx_len;
 434
 435	/* Ensure the data above is visible for all CPUs */
 436	smp_mb();
 437
 438	dw_spi_enable_chip(dws, 0);
 439
 440	dw_spi_update_config(dws, spi, &cfg);
 441
 442	transfer->effective_speed_hz = dws->current_freq;
 443
 444	/* Check if current transfer is a DMA transaction */
 445	if (master->can_dma && master->can_dma(master, spi, transfer))
 446		dws->dma_mapped = master->cur_msg_mapped;
 447
 448	/* For poll mode just disable all interrupts */
 449	dw_spi_mask_intr(dws, 0xff);
 450
 451	if (dws->dma_mapped) {
 452		ret = dws->dma_ops->dma_setup(dws, transfer);
 453		if (ret)
 454			return ret;
 455	}
 456
 457	dw_spi_enable_chip(dws, 1);
 458
 459	if (dws->dma_mapped)
 460		return dws->dma_ops->dma_transfer(dws, transfer);
 461	else if (dws->irq == IRQ_NOTCONNECTED)
 462		return dw_spi_poll_transfer(dws, transfer);
 463
 464	dw_spi_irq_setup(dws);
 465
 466	return 1;
 467}
 468
 469static void dw_spi_handle_err(struct spi_controller *master,
 470			      struct spi_message *msg)
 471{
 472	struct dw_spi *dws = spi_controller_get_devdata(master);
 473
 474	if (dws->dma_mapped)
 475		dws->dma_ops->dma_stop(dws);
 476
 477	dw_spi_reset_chip(dws);
 478}
 479
 480static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
 481{
 482	if (op->data.dir == SPI_MEM_DATA_IN)
 483		op->data.nbytes = clamp_val(op->data.nbytes, 0, DW_SPI_NDF_MASK + 1);
 484
 485	return 0;
 486}
 487
 488static bool dw_spi_supports_mem_op(struct spi_mem *mem,
 489				   const struct spi_mem_op *op)
 490{
 491	if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
 492	    op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
 493		return false;
 494
 495	return spi_mem_default_supports_op(mem, op);
 496}
 497
 498static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
 499{
 500	unsigned int i, j, len;
 501	u8 *out;
 502
 503	/*
 504	 * Calculate the total length of the EEPROM command transfer and
 505	 * either use the pre-allocated buffer or create a temporary one.
 506	 */
 507	len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
 508	if (op->data.dir == SPI_MEM_DATA_OUT)
 509		len += op->data.nbytes;
 510
 511	if (len <= DW_SPI_BUF_SIZE) {
 512		out = dws->buf;
 513	} else {
 514		out = kzalloc(len, GFP_KERNEL);
 515		if (!out)
 516			return -ENOMEM;
 517	}
 518
 519	/*
 520	 * Collect the operation code, address and dummy bytes into the single
 521	 * buffer. If it's a transfer with data to be sent, also copy it into the
 522	 * single buffer in order to speed the data transmission up.
 523	 */
 524	for (i = 0; i < op->cmd.nbytes; ++i)
 525		out[i] = DW_SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
 526	for (j = 0; j < op->addr.nbytes; ++i, ++j)
 527		out[i] = DW_SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
 528	for (j = 0; j < op->dummy.nbytes; ++i, ++j)
 529		out[i] = 0x0;
 530
 531	if (op->data.dir == SPI_MEM_DATA_OUT)
 532		memcpy(&out[i], op->data.buf.out, op->data.nbytes);
 533
 534	dws->n_bytes = 1;
 535	dws->tx = out;
 536	dws->tx_len = len;
 537	if (op->data.dir == SPI_MEM_DATA_IN) {
 538		dws->rx = op->data.buf.in;
 539		dws->rx_len = op->data.nbytes;
 540	} else {
 541		dws->rx = NULL;
 542		dws->rx_len = 0;
 543	}
 544
 545	return 0;
 546}
 547
 548static void dw_spi_free_mem_buf(struct dw_spi *dws)
 549{
 550	if (dws->tx != dws->buf)
 551		kfree(dws->tx);
 552}
 553
 554static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
 555{
 556	u32 room, entries, sts;
 557	unsigned int len;
 558	u8 *buf;
 559
 560	/*
 561	 * At initial stage we just pre-fill the Tx FIFO in with no rush,
 562	 * since native CS hasn't been enabled yet and the automatic data
 563	 * transmission won't start til we do that.
 564	 */
 565	len = min(dws->fifo_len, dws->tx_len);
 566	buf = dws->tx;
 567	while (len--)
 568		dw_write_io_reg(dws, DW_SPI_DR, *buf++);
 569
 570	/*
 571	 * After setting any bit in the SER register the transmission will
 572	 * start automatically. We have to keep up with that procedure
 573	 * otherwise the CS de-assertion will happen whereupon the memory
 574	 * operation will be pre-terminated.
 575	 */
 576	len = dws->tx_len - ((void *)buf - dws->tx);
 577	dw_spi_set_cs(spi, false);
 578	while (len) {
 579		entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
 580		if (!entries) {
 581			dev_err(&dws->master->dev, "CS de-assertion on Tx\n");
 582			return -EIO;
 583		}
 584		room = min(dws->fifo_len - entries, len);
 585		for (; room; --room, --len)
 586			dw_write_io_reg(dws, DW_SPI_DR, *buf++);
 587	}
 588
 589	/*
 590	 * Data fetching will start automatically if the EEPROM-read mode is
 591	 * activated. We have to keep up with the incoming data pace to
 592	 * prevent the Rx FIFO overflow causing the inbound data loss.
 593	 */
 594	len = dws->rx_len;
 595	buf = dws->rx;
 596	while (len) {
 597		entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
 598		if (!entries) {
 599			sts = readl_relaxed(dws->regs + DW_SPI_RISR);
 600			if (sts & DW_SPI_INT_RXOI) {
 601				dev_err(&dws->master->dev, "FIFO overflow on Rx\n");
 602				return -EIO;
 603			}
 604			continue;
 605		}
 606		entries = min(entries, len);
 607		for (; entries; --entries, --len)
 608			*buf++ = dw_read_io_reg(dws, DW_SPI_DR);
 609	}
 610
 611	return 0;
 612}
 613
 614static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
 615{
 616	return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
 617}
 618
 619static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
 620{
 621	int retry = DW_SPI_WAIT_RETRIES;
 622	struct spi_delay delay;
 623	unsigned long ns, us;
 624	u32 nents;
 625
 626	nents = dw_readl(dws, DW_SPI_TXFLR);
 627	ns = NSEC_PER_SEC / dws->current_freq * nents;
 628	ns *= dws->n_bytes * BITS_PER_BYTE;
 629	if (ns <= NSEC_PER_USEC) {
 630		delay.unit = SPI_DELAY_UNIT_NSECS;
 631		delay.value = ns;
 632	} else {
 633		us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
 634		delay.unit = SPI_DELAY_UNIT_USECS;
 635		delay.value = clamp_val(us, 0, USHRT_MAX);
 636	}
 637
 638	while (dw_spi_ctlr_busy(dws) && retry--)
 639		spi_delay_exec(&delay, NULL);
 640
 641	if (retry < 0) {
 642		dev_err(&dws->master->dev, "Mem op hanged up\n");
 643		return -EIO;
 644	}
 645
 646	return 0;
 647}
 648
 649static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
 650{
 651	dw_spi_enable_chip(dws, 0);
 652	dw_spi_set_cs(spi, true);
 653	dw_spi_enable_chip(dws, 1);
 654}
 655
 656/*
 657 * The SPI memory operation implementation below is the best choice for the
 658 * devices, which are selected by the native chip-select lane. It's
 659 * specifically developed to workaround the problem with automatic chip-select
 660 * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
 661 * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
 662 * unavailable.
 663 */
 664static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
 665{
 666	struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
 667	struct dw_spi_cfg cfg;
 668	unsigned long flags;
 669	int ret;
 670
 671	/*
 672	 * Collect the outbound data into a single buffer to speed the
 673	 * transmission up at least on the initial stage.
 674	 */
 675	ret = dw_spi_init_mem_buf(dws, op);
 676	if (ret)
 677		return ret;
 678
 679	/*
 680	 * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
 681	 * operation. Transmit-only mode is suitable for the rest of them.
 682	 */
 683	cfg.dfs = 8;
 684	cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
 685	if (op->data.dir == SPI_MEM_DATA_IN) {
 686		cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
 687		cfg.ndf = op->data.nbytes;
 688	} else {
 689		cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
 690	}
 691
 692	dw_spi_enable_chip(dws, 0);
 693
 694	dw_spi_update_config(dws, mem->spi, &cfg);
 695
 696	dw_spi_mask_intr(dws, 0xff);
 697
 698	dw_spi_enable_chip(dws, 1);
 699
 700	/*
 701	 * DW APB SSI controller has very nasty peculiarities. First originally
 702	 * (without any vendor-specific modifications) it doesn't provide a
 703	 * direct way to set and clear the native chip-select signal. Instead
 704	 * the controller asserts the CS lane if Tx FIFO isn't empty and a
 705	 * transmission is going on, and automatically de-asserts it back to
 706	 * the high level if the Tx FIFO doesn't have anything to be pushed
 707	 * out. Due to that a multi-tasking or heavy IRQs activity might be
 708	 * fatal, since the transfer procedure preemption may cause the Tx FIFO
 709	 * getting empty and sudden CS de-assertion, which in the middle of the
 710	 * transfer will most likely cause the data loss. Secondly the
 711	 * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
 712	 * data being automatically pulled in into the Rx FIFO. So if the
 713	 * driver software is late in fetching the data from the FIFO before
 714	 * it's overflown, new incoming data will be lost. In order to make
 715	 * sure the executed memory operations are CS-atomic and to prevent the
 716	 * Rx FIFO overflow we have to disable the local interrupts so to block
 717	 * any preemption during the subsequent IO operations.
 718	 *
 719	 * Note. At some circumstances disabling IRQs may not help to prevent
 720	 * the problems described above. The CS de-assertion and Rx FIFO
 721	 * overflow may still happen due to the relatively slow system bus or
 722	 * CPU not working fast enough, so the write-then-read algo implemented
 723	 * here just won't keep up with the SPI bus data transfer. Such
 724	 * situation is highly platform specific and is supposed to be fixed by
 725	 * manually restricting the SPI bus frequency using the
 726	 * dws->max_mem_freq parameter.
 727	 */
 728	local_irq_save(flags);
 729	preempt_disable();
 730
 731	ret = dw_spi_write_then_read(dws, mem->spi);
 732
 733	local_irq_restore(flags);
 734	preempt_enable();
 735
 736	/*
 737	 * Wait for the operation being finished and check the controller
 738	 * status only if there hasn't been any run-time error detected. In the
 739	 * former case it's just pointless. In the later one to prevent an
 740	 * additional error message printing since any hw error flag being set
 741	 * would be due to an error detected on the data transfer.
 742	 */
 743	if (!ret) {
 744		ret = dw_spi_wait_mem_op_done(dws);
 745		if (!ret)
 746			ret = dw_spi_check_status(dws, true);
 747	}
 748
 749	dw_spi_stop_mem_op(dws, mem->spi);
 750
 751	dw_spi_free_mem_buf(dws);
 752
 753	return ret;
 754}
 755
 756/*
 757 * Initialize the default memory operations if a glue layer hasn't specified
 758 * custom ones. Direct mapping operations will be preserved anyway since DW SPI
 759 * controller doesn't have an embedded dirmap interface. Note the memory
 760 * operations implemented in this driver is the best choice only for the DW APB
 761 * SSI controller with standard native CS functionality. If a hardware vendor
 762 * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
 763 * be safer to use the normal SPI-messages-based transfers implementation.
 764 */
 765static void dw_spi_init_mem_ops(struct dw_spi *dws)
 766{
 767	if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
 768	    !dws->set_cs) {
 769		dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
 770		dws->mem_ops.supports_op = dw_spi_supports_mem_op;
 771		dws->mem_ops.exec_op = dw_spi_exec_mem_op;
 772		if (!dws->max_mem_freq)
 773			dws->max_mem_freq = dws->max_freq;
 774	}
 775}
 776
 777/* This may be called twice for each spi dev */
 778static int dw_spi_setup(struct spi_device *spi)
 779{
 780	struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
 781	struct dw_spi_chip_data *chip;
 782
 783	/* Only alloc on first setup */
 784	chip = spi_get_ctldata(spi);
 785	if (!chip) {
 786		struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
 787		u32 rx_sample_dly_ns;
 788
 789		chip = kzalloc(sizeof(*chip), GFP_KERNEL);
 790		if (!chip)
 791			return -ENOMEM;
 792		spi_set_ctldata(spi, chip);
 793		/* Get specific / default rx-sample-delay */
 794		if (device_property_read_u32(&spi->dev,
 795					     "rx-sample-delay-ns",
 796					     &rx_sample_dly_ns) != 0)
 797			/* Use default controller value */
 798			rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
 799		chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
 800							NSEC_PER_SEC /
 801							dws->max_freq);
 802	}
 803
 804	/*
 805	 * Update CR0 data each time the setup callback is invoked since
 806	 * the device parameters could have been changed, for instance, by
 807	 * the MMC SPI driver or something else.
 808	 */
 809	chip->cr0 = dw_spi_prepare_cr0(dws, spi);
 810
 811	return 0;
 812}
 813
 814static void dw_spi_cleanup(struct spi_device *spi)
 815{
 816	struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
 817
 818	kfree(chip);
 819	spi_set_ctldata(spi, NULL);
 820}
 821
 822/* Restart the controller, disable all interrupts, clean rx fifo */
 823static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
 824{
 825	dw_spi_reset_chip(dws);
 826
 827	/*
 828	 * Retrieve the Synopsys component version if it hasn't been specified
 829	 * by the platform. CoreKit version ID is encoded as a 3-chars ASCII
 830	 * code enclosed with '*' (typical for the most of Synopsys IP-cores).
 831	 */
 832	if (!dws->ver) {
 833		dws->ver = dw_readl(dws, DW_SPI_VERSION);
 834
 835		dev_dbg(dev, "Synopsys DWC%sSSI v%c.%c%c\n",
 836			dw_spi_ip_is(dws, PSSI) ? " APB " : " ",
 837			DW_SPI_GET_BYTE(dws->ver, 3), DW_SPI_GET_BYTE(dws->ver, 2),
 838			DW_SPI_GET_BYTE(dws->ver, 1));
 839	}
 840
 841	/*
 842	 * Try to detect the FIFO depth if not set by interface driver,
 843	 * the depth could be from 2 to 256 from HW spec
 844	 */
 845	if (!dws->fifo_len) {
 846		u32 fifo;
 847
 848		for (fifo = 1; fifo < 256; fifo++) {
 849			dw_writel(dws, DW_SPI_TXFTLR, fifo);
 850			if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
 851				break;
 852		}
 853		dw_writel(dws, DW_SPI_TXFTLR, 0);
 854
 855		dws->fifo_len = (fifo == 1) ? 0 : fifo;
 856		dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
 857	}
 858
 859	/*
 860	 * Detect CTRLR0.DFS field size and offset by testing the lowest bits
 861	 * writability. Note DWC SSI controller also has the extended DFS, but
 862	 * with zero offset.
 863	 */
 864	if (dw_spi_ip_is(dws, PSSI)) {
 865		u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0);
 866
 867		dw_spi_enable_chip(dws, 0);
 868		dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff);
 869		cr0 = dw_readl(dws, DW_SPI_CTRLR0);
 870		dw_writel(dws, DW_SPI_CTRLR0, tmp);
 871		dw_spi_enable_chip(dws, 1);
 872
 873		if (!(cr0 & DW_PSSI_CTRLR0_DFS_MASK)) {
 874			dws->caps |= DW_SPI_CAP_DFS32;
 875			dws->dfs_offset = __bf_shf(DW_PSSI_CTRLR0_DFS32_MASK);
 876			dev_dbg(dev, "Detected 32-bits max data frame size\n");
 877		}
 878	} else {
 879		dws->caps |= DW_SPI_CAP_DFS32;
 880	}
 881
 882	/* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
 883	if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
 884		dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
 885}
 886
 887int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
 888{
 889	struct spi_controller *master;
 890	int ret;
 891
 892	if (!dws)
 893		return -EINVAL;
 894
 895	master = spi_alloc_master(dev, 0);
 896	if (!master)
 897		return -ENOMEM;
 898
 899	device_set_node(&master->dev, dev_fwnode(dev));
 900
 901	dws->master = master;
 902	dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
 903
 904	spi_controller_set_devdata(master, dws);
 905
 906	/* Basic HW init */
 907	dw_spi_hw_init(dev, dws);
 908
 909	ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
 910			  master);
 911	if (ret < 0 && ret != -ENOTCONN) {
 912		dev_err(dev, "can not get IRQ\n");
 913		goto err_free_master;
 914	}
 915
 916	dw_spi_init_mem_ops(dws);
 917
 918	master->use_gpio_descriptors = true;
 919	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
 920	if (dws->caps & DW_SPI_CAP_DFS32)
 921		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
 922	else
 923		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
 924	master->bus_num = dws->bus_num;
 925	master->num_chipselect = dws->num_cs;
 926	master->setup = dw_spi_setup;
 927	master->cleanup = dw_spi_cleanup;
 928	if (dws->set_cs)
 929		master->set_cs = dws->set_cs;
 930	else
 931		master->set_cs = dw_spi_set_cs;
 932	master->transfer_one = dw_spi_transfer_one;
 933	master->handle_err = dw_spi_handle_err;
 934	if (dws->mem_ops.exec_op)
 935		master->mem_ops = &dws->mem_ops;
 936	master->max_speed_hz = dws->max_freq;
 
 
 937	master->flags = SPI_MASTER_GPIO_SS;
 938	master->auto_runtime_pm = true;
 939
 940	/* Get default rx sample delay */
 941	device_property_read_u32(dev, "rx-sample-delay-ns",
 942				 &dws->def_rx_sample_dly_ns);
 943
 944	if (dws->dma_ops && dws->dma_ops->dma_init) {
 945		ret = dws->dma_ops->dma_init(dev, dws);
 946		if (ret == -EPROBE_DEFER) {
 947			goto err_free_irq;
 948		} else if (ret) {
 949			dev_warn(dev, "DMA init failed\n");
 950		} else {
 951			master->can_dma = dws->dma_ops->can_dma;
 952			master->flags |= SPI_CONTROLLER_MUST_TX;
 953		}
 954	}
 955
 956	ret = spi_register_controller(master);
 957	if (ret) {
 958		dev_err_probe(dev, ret, "problem registering spi master\n");
 959		goto err_dma_exit;
 960	}
 961
 962	dw_spi_debugfs_init(dws);
 963	return 0;
 964
 965err_dma_exit:
 966	if (dws->dma_ops && dws->dma_ops->dma_exit)
 967		dws->dma_ops->dma_exit(dws);
 968	dw_spi_enable_chip(dws, 0);
 969err_free_irq:
 970	free_irq(dws->irq, master);
 971err_free_master:
 972	spi_controller_put(master);
 973	return ret;
 974}
 975EXPORT_SYMBOL_NS_GPL(dw_spi_add_host, SPI_DW_CORE);
 976
 977void dw_spi_remove_host(struct dw_spi *dws)
 978{
 979	dw_spi_debugfs_remove(dws);
 980
 981	spi_unregister_controller(dws->master);
 982
 983	if (dws->dma_ops && dws->dma_ops->dma_exit)
 984		dws->dma_ops->dma_exit(dws);
 985
 986	dw_spi_shutdown_chip(dws);
 987
 988	free_irq(dws->irq, dws->master);
 989}
 990EXPORT_SYMBOL_NS_GPL(dw_spi_remove_host, SPI_DW_CORE);
 991
 992int dw_spi_suspend_host(struct dw_spi *dws)
 993{
 994	int ret;
 995
 996	ret = spi_controller_suspend(dws->master);
 997	if (ret)
 998		return ret;
 999
1000	dw_spi_shutdown_chip(dws);
1001	return 0;
1002}
1003EXPORT_SYMBOL_NS_GPL(dw_spi_suspend_host, SPI_DW_CORE);
1004
1005int dw_spi_resume_host(struct dw_spi *dws)
1006{
1007	dw_spi_hw_init(&dws->master->dev, dws);
1008	return spi_controller_resume(dws->master);
1009}
1010EXPORT_SYMBOL_NS_GPL(dw_spi_resume_host, SPI_DW_CORE);
1011
1012MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
1013MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
1014MODULE_LICENSE("GPL v2");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Designware SPI core controller driver (refer pxa2xx_spi.c)
  4 *
  5 * Copyright (c) 2009, Intel Corporation.
  6 */
  7
 
  8#include <linux/dma-mapping.h>
  9#include <linux/interrupt.h>
 10#include <linux/module.h>
 11#include <linux/preempt.h>
 12#include <linux/highmem.h>
 13#include <linux/delay.h>
 14#include <linux/slab.h>
 15#include <linux/spi/spi.h>
 16#include <linux/spi/spi-mem.h>
 17#include <linux/string.h>
 18#include <linux/of.h>
 19
 20#include "spi-dw.h"
 21
 22#ifdef CONFIG_DEBUG_FS
 23#include <linux/debugfs.h>
 24#endif
 25
 26/* Slave spi_device related */
 27struct chip_data {
 28	u32 cr0;
 29	u32 rx_sample_dly;	/* RX sample delay */
 30};
 31
 32#ifdef CONFIG_DEBUG_FS
 33
 34#define DW_SPI_DBGFS_REG(_name, _off)	\
 35{					\
 36	.name = _name,			\
 37	.offset = _off,			\
 38}
 39
 40static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
 41	DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
 42	DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
 43	DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
 44	DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
 45	DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
 46	DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
 47	DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
 48	DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
 49	DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
 50	DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
 51	DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
 52	DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
 53	DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
 54	DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
 55	DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
 56	DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
 57};
 58
 59static int dw_spi_debugfs_init(struct dw_spi *dws)
 60{
 61	char name[32];
 62
 63	snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
 64	dws->debugfs = debugfs_create_dir(name, NULL);
 65	if (!dws->debugfs)
 66		return -ENOMEM;
 67
 68	dws->regset.regs = dw_spi_dbgfs_regs;
 69	dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
 70	dws->regset.base = dws->regs;
 71	debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
 72
 73	return 0;
 74}
 75
 76static void dw_spi_debugfs_remove(struct dw_spi *dws)
 77{
 78	debugfs_remove_recursive(dws->debugfs);
 79}
 80
 81#else
 82static inline int dw_spi_debugfs_init(struct dw_spi *dws)
 83{
 84	return 0;
 85}
 86
 87static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
 88{
 89}
 90#endif /* CONFIG_DEBUG_FS */
 91
 92void dw_spi_set_cs(struct spi_device *spi, bool enable)
 93{
 94	struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
 95	bool cs_high = !!(spi->mode & SPI_CS_HIGH);
 96
 97	/*
 98	 * DW SPI controller demands any native CS being set in order to
 99	 * proceed with data transfer. So in order to activate the SPI
100	 * communications we must set a corresponding bit in the Slave
101	 * Enable register no matter whether the SPI core is configured to
102	 * support active-high or active-low CS level.
103	 */
104	if (cs_high == enable)
105		dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
106	else
107		dw_writel(dws, DW_SPI_SER, 0);
108}
109EXPORT_SYMBOL_GPL(dw_spi_set_cs);
110
111/* Return the max entries we can fill into tx fifo */
112static inline u32 tx_max(struct dw_spi *dws)
113{
114	u32 tx_room, rxtx_gap;
115
116	tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
117
118	/*
119	 * Another concern is about the tx/rx mismatch, we
120	 * though to use (dws->fifo_len - rxflr - txflr) as
121	 * one maximum value for tx, but it doesn't cover the
122	 * data which is out of tx/rx fifo and inside the
123	 * shift registers. So a control from sw point of
124	 * view is taken.
125	 */
126	rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
127
128	return min3((u32)dws->tx_len, tx_room, rxtx_gap);
129}
130
131/* Return the max entries we should read out of rx fifo */
132static inline u32 rx_max(struct dw_spi *dws)
133{
134	return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
135}
136
137static void dw_writer(struct dw_spi *dws)
138{
139	u32 max = tx_max(dws);
140	u32 txw = 0;
141
142	while (max--) {
143		if (dws->tx) {
144			if (dws->n_bytes == 1)
145				txw = *(u8 *)(dws->tx);
146			else if (dws->n_bytes == 2)
147				txw = *(u16 *)(dws->tx);
148			else
149				txw = *(u32 *)(dws->tx);
150
151			dws->tx += dws->n_bytes;
152		}
153		dw_write_io_reg(dws, DW_SPI_DR, txw);
154		--dws->tx_len;
155	}
156}
157
158static void dw_reader(struct dw_spi *dws)
159{
160	u32 max = rx_max(dws);
161	u32 rxw;
162
163	while (max--) {
164		rxw = dw_read_io_reg(dws, DW_SPI_DR);
165		if (dws->rx) {
166			if (dws->n_bytes == 1)
167				*(u8 *)(dws->rx) = rxw;
168			else if (dws->n_bytes == 2)
169				*(u16 *)(dws->rx) = rxw;
170			else
171				*(u32 *)(dws->rx) = rxw;
172
173			dws->rx += dws->n_bytes;
174		}
175		--dws->rx_len;
176	}
177}
178
179int dw_spi_check_status(struct dw_spi *dws, bool raw)
180{
181	u32 irq_status;
182	int ret = 0;
183
184	if (raw)
185		irq_status = dw_readl(dws, DW_SPI_RISR);
186	else
187		irq_status = dw_readl(dws, DW_SPI_ISR);
188
189	if (irq_status & SPI_INT_RXOI) {
190		dev_err(&dws->master->dev, "RX FIFO overflow detected\n");
191		ret = -EIO;
192	}
193
194	if (irq_status & SPI_INT_RXUI) {
195		dev_err(&dws->master->dev, "RX FIFO underflow detected\n");
196		ret = -EIO;
197	}
198
199	if (irq_status & SPI_INT_TXOI) {
200		dev_err(&dws->master->dev, "TX FIFO overflow detected\n");
201		ret = -EIO;
202	}
203
204	/* Generically handle the erroneous situation */
205	if (ret) {
206		spi_reset_chip(dws);
207		if (dws->master->cur_msg)
208			dws->master->cur_msg->status = ret;
209	}
210
211	return ret;
212}
213EXPORT_SYMBOL_GPL(dw_spi_check_status);
214
215static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
216{
217	u16 irq_status = dw_readl(dws, DW_SPI_ISR);
218
219	if (dw_spi_check_status(dws, false)) {
220		spi_finalize_current_transfer(dws->master);
221		return IRQ_HANDLED;
222	}
223
224	/*
225	 * Read data from the Rx FIFO every time we've got a chance executing
226	 * this method. If there is nothing left to receive, terminate the
227	 * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
228	 * final stage of the transfer. By doing so we'll get the next IRQ
229	 * right when the leftover incoming data is received.
230	 */
231	dw_reader(dws);
232	if (!dws->rx_len) {
233		spi_mask_intr(dws, 0xff);
234		spi_finalize_current_transfer(dws->master);
235	} else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
236		dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
237	}
238
239	/*
240	 * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
241	 * disabled after the data transmission is finished so not to
242	 * have the TXE IRQ flood at the final stage of the transfer.
243	 */
244	if (irq_status & SPI_INT_TXEI) {
245		dw_writer(dws);
246		if (!dws->tx_len)
247			spi_mask_intr(dws, SPI_INT_TXEI);
248	}
249
250	return IRQ_HANDLED;
251}
252
253static irqreturn_t dw_spi_irq(int irq, void *dev_id)
254{
255	struct spi_controller *master = dev_id;
256	struct dw_spi *dws = spi_controller_get_devdata(master);
257	u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
258
259	if (!irq_status)
260		return IRQ_NONE;
261
262	if (!master->cur_msg) {
263		spi_mask_intr(dws, 0xff);
264		return IRQ_HANDLED;
265	}
266
267	return dws->transfer_handler(dws);
268}
269
270static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
271{
272	u32 cr0 = 0;
273
274	if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) {
275		/* CTRLR0[ 5: 4] Frame Format */
276		cr0 |= SSI_MOTO_SPI << SPI_FRF_OFFSET;
277
278		/*
279		 * SPI mode (SCPOL|SCPH)
280		 * CTRLR0[ 6] Serial Clock Phase
281		 * CTRLR0[ 7] Serial Clock Polarity
282		 */
283		cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET;
284		cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET;
 
 
285
286		/* CTRLR0[11] Shift Register Loop */
287		cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET;
 
288	} else {
289		/* CTRLR0[ 7: 6] Frame Format */
290		cr0 |= SSI_MOTO_SPI << DWC_SSI_CTRLR0_FRF_OFFSET;
291
292		/*
293		 * SPI mode (SCPOL|SCPH)
294		 * CTRLR0[ 8] Serial Clock Phase
295		 * CTRLR0[ 9] Serial Clock Polarity
296		 */
297		cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET;
298		cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET;
 
 
299
300		/* CTRLR0[13] Shift Register Loop */
301		cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET;
 
302
303		if (dws->caps & DW_SPI_CAP_KEEMBAY_MST)
304			cr0 |= DWC_SSI_CTRLR0_KEEMBAY_MST;
 
305	}
306
307	return cr0;
308}
309
310void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
311			  struct dw_spi_cfg *cfg)
312{
313	struct chip_data *chip = spi_get_ctldata(spi);
314	u32 cr0 = chip->cr0;
315	u32 speed_hz;
316	u16 clk_div;
317
318	/* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */
319	cr0 |= (cfg->dfs - 1) << dws->dfs_offset;
320
321	if (!(dws->caps & DW_SPI_CAP_DWC_SSI))
322		/* CTRLR0[ 9:8] Transfer Mode */
323		cr0 |= cfg->tmode << SPI_TMOD_OFFSET;
324	else
325		/* CTRLR0[11:10] Transfer Mode */
326		cr0 |= cfg->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET;
327
328	dw_writel(dws, DW_SPI_CTRLR0, cr0);
329
330	if (cfg->tmode == SPI_TMOD_EPROMREAD || cfg->tmode == SPI_TMOD_RO)
 
331		dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
332
333	/* Note DW APB SSI clock divider doesn't support odd numbers */
334	clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
335	speed_hz = dws->max_freq / clk_div;
336
337	if (dws->current_freq != speed_hz) {
338		spi_set_clk(dws, clk_div);
339		dws->current_freq = speed_hz;
340	}
341
342	/* Update RX sample delay if required */
343	if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
344		dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
345		dws->cur_rx_sample_dly = chip->rx_sample_dly;
346	}
347}
348EXPORT_SYMBOL_GPL(dw_spi_update_config);
349
350static void dw_spi_irq_setup(struct dw_spi *dws)
351{
352	u16 level;
353	u8 imask;
354
355	/*
356	 * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
357	 * will be adjusted at the final stage of the IRQ-based SPI transfer
358	 * execution so not to lose the leftover of the incoming data.
359	 */
360	level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
361	dw_writel(dws, DW_SPI_TXFTLR, level);
362	dw_writel(dws, DW_SPI_RXFTLR, level - 1);
363
364	dws->transfer_handler = dw_spi_transfer_handler;
365
366	imask = SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI |
367		SPI_INT_RXFI;
368	spi_umask_intr(dws, imask);
369}
370
371/*
372 * The iterative procedure of the poll-based transfer is simple: write as much
373 * as possible to the Tx FIFO, wait until the pending to receive data is ready
374 * to be read, read it from the Rx FIFO and check whether the performed
375 * procedure has been successful.
376 *
377 * Note this method the same way as the IRQ-based transfer won't work well for
378 * the SPI devices connected to the controller with native CS due to the
379 * automatic CS assertion/de-assertion.
380 */
381static int dw_spi_poll_transfer(struct dw_spi *dws,
382				struct spi_transfer *transfer)
383{
384	struct spi_delay delay;
385	u16 nbits;
386	int ret;
387
388	delay.unit = SPI_DELAY_UNIT_SCK;
389	nbits = dws->n_bytes * BITS_PER_BYTE;
390
391	do {
392		dw_writer(dws);
393
394		delay.value = nbits * (dws->rx_len - dws->tx_len);
395		spi_delay_exec(&delay, transfer);
396
397		dw_reader(dws);
398
399		ret = dw_spi_check_status(dws, true);
400		if (ret)
401			return ret;
402	} while (dws->rx_len);
403
404	return 0;
405}
406
407static int dw_spi_transfer_one(struct spi_controller *master,
408		struct spi_device *spi, struct spi_transfer *transfer)
 
409{
410	struct dw_spi *dws = spi_controller_get_devdata(master);
411	struct dw_spi_cfg cfg = {
412		.tmode = SPI_TMOD_TR,
413		.dfs = transfer->bits_per_word,
414		.freq = transfer->speed_hz,
415	};
416	int ret;
417
418	dws->dma_mapped = 0;
419	dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
420	dws->tx = (void *)transfer->tx_buf;
421	dws->tx_len = transfer->len / dws->n_bytes;
422	dws->rx = transfer->rx_buf;
423	dws->rx_len = dws->tx_len;
424
425	/* Ensure the data above is visible for all CPUs */
426	smp_mb();
427
428	spi_enable_chip(dws, 0);
429
430	dw_spi_update_config(dws, spi, &cfg);
431
432	transfer->effective_speed_hz = dws->current_freq;
433
434	/* Check if current transfer is a DMA transaction */
435	if (master->can_dma && master->can_dma(master, spi, transfer))
436		dws->dma_mapped = master->cur_msg_mapped;
437
438	/* For poll mode just disable all interrupts */
439	spi_mask_intr(dws, 0xff);
440
441	if (dws->dma_mapped) {
442		ret = dws->dma_ops->dma_setup(dws, transfer);
443		if (ret)
444			return ret;
445	}
446
447	spi_enable_chip(dws, 1);
448
449	if (dws->dma_mapped)
450		return dws->dma_ops->dma_transfer(dws, transfer);
451	else if (dws->irq == IRQ_NOTCONNECTED)
452		return dw_spi_poll_transfer(dws, transfer);
453
454	dw_spi_irq_setup(dws);
455
456	return 1;
457}
458
459static void dw_spi_handle_err(struct spi_controller *master,
460		struct spi_message *msg)
461{
462	struct dw_spi *dws = spi_controller_get_devdata(master);
463
464	if (dws->dma_mapped)
465		dws->dma_ops->dma_stop(dws);
466
467	spi_reset_chip(dws);
468}
469
470static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
471{
472	if (op->data.dir == SPI_MEM_DATA_IN)
473		op->data.nbytes = clamp_val(op->data.nbytes, 0, SPI_NDF_MASK + 1);
474
475	return 0;
476}
477
478static bool dw_spi_supports_mem_op(struct spi_mem *mem,
479				   const struct spi_mem_op *op)
480{
481	if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
482	    op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
483		return false;
484
485	return spi_mem_default_supports_op(mem, op);
486}
487
488static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
489{
490	unsigned int i, j, len;
491	u8 *out;
492
493	/*
494	 * Calculate the total length of the EEPROM command transfer and
495	 * either use the pre-allocated buffer or create a temporary one.
496	 */
497	len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
498	if (op->data.dir == SPI_MEM_DATA_OUT)
499		len += op->data.nbytes;
500
501	if (len <= SPI_BUF_SIZE) {
502		out = dws->buf;
503	} else {
504		out = kzalloc(len, GFP_KERNEL);
505		if (!out)
506			return -ENOMEM;
507	}
508
509	/*
510	 * Collect the operation code, address and dummy bytes into the single
511	 * buffer. If it's a transfer with data to be sent, also copy it into the
512	 * single buffer in order to speed the data transmission up.
513	 */
514	for (i = 0; i < op->cmd.nbytes; ++i)
515		out[i] = SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
516	for (j = 0; j < op->addr.nbytes; ++i, ++j)
517		out[i] = SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
518	for (j = 0; j < op->dummy.nbytes; ++i, ++j)
519		out[i] = 0x0;
520
521	if (op->data.dir == SPI_MEM_DATA_OUT)
522		memcpy(&out[i], op->data.buf.out, op->data.nbytes);
523
524	dws->n_bytes = 1;
525	dws->tx = out;
526	dws->tx_len = len;
527	if (op->data.dir == SPI_MEM_DATA_IN) {
528		dws->rx = op->data.buf.in;
529		dws->rx_len = op->data.nbytes;
530	} else {
531		dws->rx = NULL;
532		dws->rx_len = 0;
533	}
534
535	return 0;
536}
537
538static void dw_spi_free_mem_buf(struct dw_spi *dws)
539{
540	if (dws->tx != dws->buf)
541		kfree(dws->tx);
542}
543
544static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
545{
546	u32 room, entries, sts;
547	unsigned int len;
548	u8 *buf;
549
550	/*
551	 * At initial stage we just pre-fill the Tx FIFO in with no rush,
552	 * since native CS hasn't been enabled yet and the automatic data
553	 * transmission won't start til we do that.
554	 */
555	len = min(dws->fifo_len, dws->tx_len);
556	buf = dws->tx;
557	while (len--)
558		dw_write_io_reg(dws, DW_SPI_DR, *buf++);
559
560	/*
561	 * After setting any bit in the SER register the transmission will
562	 * start automatically. We have to keep up with that procedure
563	 * otherwise the CS de-assertion will happen whereupon the memory
564	 * operation will be pre-terminated.
565	 */
566	len = dws->tx_len - ((void *)buf - dws->tx);
567	dw_spi_set_cs(spi, false);
568	while (len) {
569		entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
570		if (!entries) {
571			dev_err(&dws->master->dev, "CS de-assertion on Tx\n");
572			return -EIO;
573		}
574		room = min(dws->fifo_len - entries, len);
575		for (; room; --room, --len)
576			dw_write_io_reg(dws, DW_SPI_DR, *buf++);
577	}
578
579	/*
580	 * Data fetching will start automatically if the EEPROM-read mode is
581	 * activated. We have to keep up with the incoming data pace to
582	 * prevent the Rx FIFO overflow causing the inbound data loss.
583	 */
584	len = dws->rx_len;
585	buf = dws->rx;
586	while (len) {
587		entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
588		if (!entries) {
589			sts = readl_relaxed(dws->regs + DW_SPI_RISR);
590			if (sts & SPI_INT_RXOI) {
591				dev_err(&dws->master->dev, "FIFO overflow on Rx\n");
592				return -EIO;
593			}
594			continue;
595		}
596		entries = min(entries, len);
597		for (; entries; --entries, --len)
598			*buf++ = dw_read_io_reg(dws, DW_SPI_DR);
599	}
600
601	return 0;
602}
603
604static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
605{
606	return dw_readl(dws, DW_SPI_SR) & SR_BUSY;
607}
608
609static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
610{
611	int retry = SPI_WAIT_RETRIES;
612	struct spi_delay delay;
613	unsigned long ns, us;
614	u32 nents;
615
616	nents = dw_readl(dws, DW_SPI_TXFLR);
617	ns = NSEC_PER_SEC / dws->current_freq * nents;
618	ns *= dws->n_bytes * BITS_PER_BYTE;
619	if (ns <= NSEC_PER_USEC) {
620		delay.unit = SPI_DELAY_UNIT_NSECS;
621		delay.value = ns;
622	} else {
623		us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
624		delay.unit = SPI_DELAY_UNIT_USECS;
625		delay.value = clamp_val(us, 0, USHRT_MAX);
626	}
627
628	while (dw_spi_ctlr_busy(dws) && retry--)
629		spi_delay_exec(&delay, NULL);
630
631	if (retry < 0) {
632		dev_err(&dws->master->dev, "Mem op hanged up\n");
633		return -EIO;
634	}
635
636	return 0;
637}
638
639static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
640{
641	spi_enable_chip(dws, 0);
642	dw_spi_set_cs(spi, true);
643	spi_enable_chip(dws, 1);
644}
645
646/*
647 * The SPI memory operation implementation below is the best choice for the
648 * devices, which are selected by the native chip-select lane. It's
649 * specifically developed to workaround the problem with automatic chip-select
650 * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
651 * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
652 * unavailable.
653 */
654static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
655{
656	struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
657	struct dw_spi_cfg cfg;
658	unsigned long flags;
659	int ret;
660
661	/*
662	 * Collect the outbound data into a single buffer to speed the
663	 * transmission up at least on the initial stage.
664	 */
665	ret = dw_spi_init_mem_buf(dws, op);
666	if (ret)
667		return ret;
668
669	/*
670	 * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
671	 * operation. Transmit-only mode is suitable for the rest of them.
672	 */
673	cfg.dfs = 8;
674	cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
675	if (op->data.dir == SPI_MEM_DATA_IN) {
676		cfg.tmode = SPI_TMOD_EPROMREAD;
677		cfg.ndf = op->data.nbytes;
678	} else {
679		cfg.tmode = SPI_TMOD_TO;
680	}
681
682	spi_enable_chip(dws, 0);
683
684	dw_spi_update_config(dws, mem->spi, &cfg);
685
686	spi_mask_intr(dws, 0xff);
687
688	spi_enable_chip(dws, 1);
689
690	/*
691	 * DW APB SSI controller has very nasty peculiarities. First originally
692	 * (without any vendor-specific modifications) it doesn't provide a
693	 * direct way to set and clear the native chip-select signal. Instead
694	 * the controller asserts the CS lane if Tx FIFO isn't empty and a
695	 * transmission is going on, and automatically de-asserts it back to
696	 * the high level if the Tx FIFO doesn't have anything to be pushed
697	 * out. Due to that a multi-tasking or heavy IRQs activity might be
698	 * fatal, since the transfer procedure preemption may cause the Tx FIFO
699	 * getting empty and sudden CS de-assertion, which in the middle of the
700	 * transfer will most likely cause the data loss. Secondly the
701	 * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
702	 * data being automatically pulled in into the Rx FIFO. So if the
703	 * driver software is late in fetching the data from the FIFO before
704	 * it's overflown, new incoming data will be lost. In order to make
705	 * sure the executed memory operations are CS-atomic and to prevent the
706	 * Rx FIFO overflow we have to disable the local interrupts so to block
707	 * any preemption during the subsequent IO operations.
708	 *
709	 * Note. At some circumstances disabling IRQs may not help to prevent
710	 * the problems described above. The CS de-assertion and Rx FIFO
711	 * overflow may still happen due to the relatively slow system bus or
712	 * CPU not working fast enough, so the write-then-read algo implemented
713	 * here just won't keep up with the SPI bus data transfer. Such
714	 * situation is highly platform specific and is supposed to be fixed by
715	 * manually restricting the SPI bus frequency using the
716	 * dws->max_mem_freq parameter.
717	 */
718	local_irq_save(flags);
719	preempt_disable();
720
721	ret = dw_spi_write_then_read(dws, mem->spi);
722
723	local_irq_restore(flags);
724	preempt_enable();
725
726	/*
727	 * Wait for the operation being finished and check the controller
728	 * status only if there hasn't been any run-time error detected. In the
729	 * former case it's just pointless. In the later one to prevent an
730	 * additional error message printing since any hw error flag being set
731	 * would be due to an error detected on the data transfer.
732	 */
733	if (!ret) {
734		ret = dw_spi_wait_mem_op_done(dws);
735		if (!ret)
736			ret = dw_spi_check_status(dws, true);
737	}
738
739	dw_spi_stop_mem_op(dws, mem->spi);
740
741	dw_spi_free_mem_buf(dws);
742
743	return ret;
744}
745
746/*
747 * Initialize the default memory operations if a glue layer hasn't specified
748 * custom ones. Direct mapping operations will be preserved anyway since DW SPI
749 * controller doesn't have an embedded dirmap interface. Note the memory
750 * operations implemented in this driver is the best choice only for the DW APB
751 * SSI controller with standard native CS functionality. If a hardware vendor
752 * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
753 * be safer to use the normal SPI-messages-based transfers implementation.
754 */
755static void dw_spi_init_mem_ops(struct dw_spi *dws)
756{
757	if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
758	    !dws->set_cs) {
759		dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
760		dws->mem_ops.supports_op = dw_spi_supports_mem_op;
761		dws->mem_ops.exec_op = dw_spi_exec_mem_op;
762		if (!dws->max_mem_freq)
763			dws->max_mem_freq = dws->max_freq;
764	}
765}
766
767/* This may be called twice for each spi dev */
768static int dw_spi_setup(struct spi_device *spi)
769{
770	struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
771	struct chip_data *chip;
772
773	/* Only alloc on first setup */
774	chip = spi_get_ctldata(spi);
775	if (!chip) {
776		struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
777		u32 rx_sample_dly_ns;
778
779		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
780		if (!chip)
781			return -ENOMEM;
782		spi_set_ctldata(spi, chip);
783		/* Get specific / default rx-sample-delay */
784		if (device_property_read_u32(&spi->dev,
785					     "rx-sample-delay-ns",
786					     &rx_sample_dly_ns) != 0)
787			/* Use default controller value */
788			rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
789		chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
790							NSEC_PER_SEC /
791							dws->max_freq);
792	}
793
794	/*
795	 * Update CR0 data each time the setup callback is invoked since
796	 * the device parameters could have been changed, for instance, by
797	 * the MMC SPI driver or something else.
798	 */
799	chip->cr0 = dw_spi_prepare_cr0(dws, spi);
800
801	return 0;
802}
803
804static void dw_spi_cleanup(struct spi_device *spi)
805{
806	struct chip_data *chip = spi_get_ctldata(spi);
807
808	kfree(chip);
809	spi_set_ctldata(spi, NULL);
810}
811
812/* Restart the controller, disable all interrupts, clean rx fifo */
813static void spi_hw_init(struct device *dev, struct dw_spi *dws)
814{
815	spi_reset_chip(dws);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
816
817	/*
818	 * Try to detect the FIFO depth if not set by interface driver,
819	 * the depth could be from 2 to 256 from HW spec
820	 */
821	if (!dws->fifo_len) {
822		u32 fifo;
823
824		for (fifo = 1; fifo < 256; fifo++) {
825			dw_writel(dws, DW_SPI_TXFTLR, fifo);
826			if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
827				break;
828		}
829		dw_writel(dws, DW_SPI_TXFTLR, 0);
830
831		dws->fifo_len = (fifo == 1) ? 0 : fifo;
832		dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
833	}
834
835	/*
836	 * Detect CTRLR0.DFS field size and offset by testing the lowest bits
837	 * writability. Note DWC SSI controller also has the extended DFS, but
838	 * with zero offset.
839	 */
840	if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) {
841		u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0);
842
843		spi_enable_chip(dws, 0);
844		dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff);
845		cr0 = dw_readl(dws, DW_SPI_CTRLR0);
846		dw_writel(dws, DW_SPI_CTRLR0, tmp);
847		spi_enable_chip(dws, 1);
848
849		if (!(cr0 & SPI_DFS_MASK)) {
850			dws->caps |= DW_SPI_CAP_DFS32;
851			dws->dfs_offset = SPI_DFS32_OFFSET;
852			dev_dbg(dev, "Detected 32-bits max data frame size\n");
853		}
854	} else {
855		dws->caps |= DW_SPI_CAP_DFS32;
856	}
857
858	/* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
859	if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
860		dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
861}
862
863int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
864{
865	struct spi_controller *master;
866	int ret;
867
868	if (!dws)
869		return -EINVAL;
870
871	master = spi_alloc_master(dev, 0);
872	if (!master)
873		return -ENOMEM;
874
 
 
875	dws->master = master;
876	dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
877
878	spi_controller_set_devdata(master, dws);
879
880	/* Basic HW init */
881	spi_hw_init(dev, dws);
882
883	ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
884			  master);
885	if (ret < 0 && ret != -ENOTCONN) {
886		dev_err(dev, "can not get IRQ\n");
887		goto err_free_master;
888	}
889
890	dw_spi_init_mem_ops(dws);
891
892	master->use_gpio_descriptors = true;
893	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
894	if (dws->caps & DW_SPI_CAP_DFS32)
895		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
896	else
897		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
898	master->bus_num = dws->bus_num;
899	master->num_chipselect = dws->num_cs;
900	master->setup = dw_spi_setup;
901	master->cleanup = dw_spi_cleanup;
902	if (dws->set_cs)
903		master->set_cs = dws->set_cs;
904	else
905		master->set_cs = dw_spi_set_cs;
906	master->transfer_one = dw_spi_transfer_one;
907	master->handle_err = dw_spi_handle_err;
908	if (dws->mem_ops.exec_op)
909		master->mem_ops = &dws->mem_ops;
910	master->max_speed_hz = dws->max_freq;
911	master->dev.of_node = dev->of_node;
912	master->dev.fwnode = dev->fwnode;
913	master->flags = SPI_MASTER_GPIO_SS;
914	master->auto_runtime_pm = true;
915
916	/* Get default rx sample delay */
917	device_property_read_u32(dev, "rx-sample-delay-ns",
918				 &dws->def_rx_sample_dly_ns);
919
920	if (dws->dma_ops && dws->dma_ops->dma_init) {
921		ret = dws->dma_ops->dma_init(dev, dws);
922		if (ret) {
 
 
923			dev_warn(dev, "DMA init failed\n");
924		} else {
925			master->can_dma = dws->dma_ops->can_dma;
926			master->flags |= SPI_CONTROLLER_MUST_TX;
927		}
928	}
929
930	ret = spi_register_controller(master);
931	if (ret) {
932		dev_err(&master->dev, "problem registering spi master\n");
933		goto err_dma_exit;
934	}
935
936	dw_spi_debugfs_init(dws);
937	return 0;
938
939err_dma_exit:
940	if (dws->dma_ops && dws->dma_ops->dma_exit)
941		dws->dma_ops->dma_exit(dws);
942	spi_enable_chip(dws, 0);
 
943	free_irq(dws->irq, master);
944err_free_master:
945	spi_controller_put(master);
946	return ret;
947}
948EXPORT_SYMBOL_GPL(dw_spi_add_host);
949
950void dw_spi_remove_host(struct dw_spi *dws)
951{
952	dw_spi_debugfs_remove(dws);
953
954	spi_unregister_controller(dws->master);
955
956	if (dws->dma_ops && dws->dma_ops->dma_exit)
957		dws->dma_ops->dma_exit(dws);
958
959	spi_shutdown_chip(dws);
960
961	free_irq(dws->irq, dws->master);
962}
963EXPORT_SYMBOL_GPL(dw_spi_remove_host);
964
965int dw_spi_suspend_host(struct dw_spi *dws)
966{
967	int ret;
968
969	ret = spi_controller_suspend(dws->master);
970	if (ret)
971		return ret;
972
973	spi_shutdown_chip(dws);
974	return 0;
975}
976EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
977
978int dw_spi_resume_host(struct dw_spi *dws)
979{
980	spi_hw_init(&dws->master->dev, dws);
981	return spi_controller_resume(dws->master);
982}
983EXPORT_SYMBOL_GPL(dw_spi_resume_host);
984
985MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
986MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
987MODULE_LICENSE("GPL v2");