Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
   4 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/of_device.h>
 
  10#include <linux/clk.h>
  11#include <linux/io.h>
  12#include <linux/delay.h>
  13#include <linux/interrupt.h>
  14#include <linux/sched.h>
  15#include <linux/completion.h>
  16#include <linux/spinlock.h>
  17#include <linux/err.h>
  18#include <linux/pm_runtime.h>
  19#include <linux/spi/spi.h>
  20
  21#ifdef CONFIG_LANTIQ
  22#include <lantiq_soc.h>
  23#endif
  24
  25#define LTQ_SPI_RX_IRQ_NAME	"spi_rx"
  26#define LTQ_SPI_TX_IRQ_NAME	"spi_tx"
  27#define LTQ_SPI_ERR_IRQ_NAME	"spi_err"
  28#define LTQ_SPI_FRM_IRQ_NAME	"spi_frm"
  29
  30#define LTQ_SPI_CLC		0x00
  31#define LTQ_SPI_PISEL		0x04
  32#define LTQ_SPI_ID		0x08
  33#define LTQ_SPI_CON		0x10
  34#define LTQ_SPI_STAT		0x14
  35#define LTQ_SPI_WHBSTATE	0x18
  36#define LTQ_SPI_TB		0x20
  37#define LTQ_SPI_RB		0x24
  38#define LTQ_SPI_RXFCON		0x30
  39#define LTQ_SPI_TXFCON		0x34
  40#define LTQ_SPI_FSTAT		0x38
  41#define LTQ_SPI_BRT		0x40
  42#define LTQ_SPI_BRSTAT		0x44
  43#define LTQ_SPI_SFCON		0x60
  44#define LTQ_SPI_SFSTAT		0x64
  45#define LTQ_SPI_GPOCON		0x70
  46#define LTQ_SPI_GPOSTAT		0x74
  47#define LTQ_SPI_FPGO		0x78
  48#define LTQ_SPI_RXREQ		0x80
  49#define LTQ_SPI_RXCNT		0x84
  50#define LTQ_SPI_DMACON		0xec
  51#define LTQ_SPI_IRNEN		0xf4
  52
  53#define LTQ_SPI_CLC_SMC_S	16	/* Clock divider for sleep mode */
  54#define LTQ_SPI_CLC_SMC_M	(0xFF << LTQ_SPI_CLC_SMC_S)
  55#define LTQ_SPI_CLC_RMC_S	8	/* Clock divider for normal run mode */
  56#define LTQ_SPI_CLC_RMC_M	(0xFF << LTQ_SPI_CLC_RMC_S)
  57#define LTQ_SPI_CLC_DISS	BIT(1)	/* Disable status bit */
  58#define LTQ_SPI_CLC_DISR	BIT(0)	/* Disable request bit */
  59
  60#define LTQ_SPI_ID_TXFS_S	24	/* Implemented TX FIFO size */
  61#define LTQ_SPI_ID_RXFS_S	16	/* Implemented RX FIFO size */
  62#define LTQ_SPI_ID_MOD_S	8	/* Module ID */
  63#define LTQ_SPI_ID_MOD_M	(0xff << LTQ_SPI_ID_MOD_S)
  64#define LTQ_SPI_ID_CFG_S	5	/* DMA interface support */
  65#define LTQ_SPI_ID_CFG_M	(1 << LTQ_SPI_ID_CFG_S)
  66#define LTQ_SPI_ID_REV_M	0x1F	/* Hardware revision number */
  67
  68#define LTQ_SPI_CON_BM_S	16	/* Data width selection */
  69#define LTQ_SPI_CON_BM_M	(0x1F << LTQ_SPI_CON_BM_S)
  70#define LTQ_SPI_CON_EM		BIT(24)	/* Echo mode */
  71#define LTQ_SPI_CON_IDLE	BIT(23)	/* Idle bit value */
  72#define LTQ_SPI_CON_ENBV	BIT(22)	/* Enable byte valid control */
  73#define LTQ_SPI_CON_RUEN	BIT(12)	/* Receive underflow error enable */
  74#define LTQ_SPI_CON_TUEN	BIT(11)	/* Transmit underflow error enable */
  75#define LTQ_SPI_CON_AEN		BIT(10)	/* Abort error enable */
  76#define LTQ_SPI_CON_REN		BIT(9)	/* Receive overflow error enable */
  77#define LTQ_SPI_CON_TEN		BIT(8)	/* Transmit overflow error enable */
  78#define LTQ_SPI_CON_LB		BIT(7)	/* Loopback control */
  79#define LTQ_SPI_CON_PO		BIT(6)	/* Clock polarity control */
  80#define LTQ_SPI_CON_PH		BIT(5)	/* Clock phase control */
  81#define LTQ_SPI_CON_HB		BIT(4)	/* Heading control */
  82#define LTQ_SPI_CON_RXOFF	BIT(1)	/* Switch receiver off */
  83#define LTQ_SPI_CON_TXOFF	BIT(0)	/* Switch transmitter off */
  84
  85#define LTQ_SPI_STAT_RXBV_S	28
  86#define LTQ_SPI_STAT_RXBV_M	(0x7 << LTQ_SPI_STAT_RXBV_S)
  87#define LTQ_SPI_STAT_BSY	BIT(13)	/* Busy flag */
  88#define LTQ_SPI_STAT_RUE	BIT(12)	/* Receive underflow error flag */
  89#define LTQ_SPI_STAT_TUE	BIT(11)	/* Transmit underflow error flag */
  90#define LTQ_SPI_STAT_AE		BIT(10)	/* Abort error flag */
  91#define LTQ_SPI_STAT_RE		BIT(9)	/* Receive error flag */
  92#define LTQ_SPI_STAT_TE		BIT(8)	/* Transmit error flag */
  93#define LTQ_SPI_STAT_ME		BIT(7)	/* Mode error flag */
  94#define LTQ_SPI_STAT_MS		BIT(1)	/* Master/slave select bit */
  95#define LTQ_SPI_STAT_EN		BIT(0)	/* Enable bit */
  96#define LTQ_SPI_STAT_ERRORS	(LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
  97				 LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
  98				 LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE)
  99
 100#define LTQ_SPI_WHBSTATE_SETTUE	BIT(15)	/* Set transmit underflow error flag */
 101#define LTQ_SPI_WHBSTATE_SETAE	BIT(14)	/* Set abort error flag */
 102#define LTQ_SPI_WHBSTATE_SETRE	BIT(13)	/* Set receive error flag */
 103#define LTQ_SPI_WHBSTATE_SETTE	BIT(12)	/* Set transmit error flag */
 104#define LTQ_SPI_WHBSTATE_CLRTUE	BIT(11)	/* Clear transmit underflow error flag */
 105#define LTQ_SPI_WHBSTATE_CLRAE	BIT(10)	/* Clear abort error flag */
 106#define LTQ_SPI_WHBSTATE_CLRRE	BIT(9)	/* Clear receive error flag */
 107#define LTQ_SPI_WHBSTATE_CLRTE	BIT(8)	/* Clear transmit error flag */
 108#define LTQ_SPI_WHBSTATE_SETME	BIT(7)	/* Set mode error flag */
 109#define LTQ_SPI_WHBSTATE_CLRME	BIT(6)	/* Clear mode error flag */
 110#define LTQ_SPI_WHBSTATE_SETRUE	BIT(5)	/* Set receive underflow error flag */
 111#define LTQ_SPI_WHBSTATE_CLRRUE	BIT(4)	/* Clear receive underflow error flag */
 112#define LTQ_SPI_WHBSTATE_SETMS	BIT(3)	/* Set master select bit */
 113#define LTQ_SPI_WHBSTATE_CLRMS	BIT(2)	/* Clear master select bit */
 114#define LTQ_SPI_WHBSTATE_SETEN	BIT(1)	/* Set enable bit (operational mode) */
 115#define LTQ_SPI_WHBSTATE_CLREN	BIT(0)	/* Clear enable bit (config mode */
 116#define LTQ_SPI_WHBSTATE_CLR_ERRORS	(LTQ_SPI_WHBSTATE_CLRRUE | \
 117					 LTQ_SPI_WHBSTATE_CLRME | \
 118					 LTQ_SPI_WHBSTATE_CLRTE | \
 119					 LTQ_SPI_WHBSTATE_CLRRE | \
 120					 LTQ_SPI_WHBSTATE_CLRAE | \
 121					 LTQ_SPI_WHBSTATE_CLRTUE)
 122
 123#define LTQ_SPI_RXFCON_RXFITL_S	8	/* FIFO interrupt trigger level */
 124#define LTQ_SPI_RXFCON_RXFLU	BIT(1)	/* FIFO flush */
 125#define LTQ_SPI_RXFCON_RXFEN	BIT(0)	/* FIFO enable */
 126
 127#define LTQ_SPI_TXFCON_TXFITL_S	8	/* FIFO interrupt trigger level */
 128#define LTQ_SPI_TXFCON_TXFLU	BIT(1)	/* FIFO flush */
 129#define LTQ_SPI_TXFCON_TXFEN	BIT(0)	/* FIFO enable */
 130
 131#define LTQ_SPI_FSTAT_RXFFL_S	0
 132#define LTQ_SPI_FSTAT_TXFFL_S	8
 133
 134#define LTQ_SPI_GPOCON_ISCSBN_S	8
 135#define LTQ_SPI_GPOCON_INVOUTN_S	0
 136
 137#define LTQ_SPI_FGPO_SETOUTN_S	8
 138#define LTQ_SPI_FGPO_CLROUTN_S	0
 139
 140#define LTQ_SPI_RXREQ_RXCNT_M	0xFFFF	/* Receive count value */
 141#define LTQ_SPI_RXCNT_TODO_M	0xFFFF	/* Recevie to-do value */
 142
 143#define LTQ_SPI_IRNEN_TFI	BIT(4)	/* TX finished interrupt */
 144#define LTQ_SPI_IRNEN_F		BIT(3)	/* Frame end interrupt request */
 145#define LTQ_SPI_IRNEN_E		BIT(2)	/* Error end interrupt request */
 146#define LTQ_SPI_IRNEN_T_XWAY	BIT(1)	/* Transmit end interrupt request */
 147#define LTQ_SPI_IRNEN_R_XWAY	BIT(0)	/* Receive end interrupt request */
 148#define LTQ_SPI_IRNEN_R_XRX	BIT(1)	/* Transmit end interrupt request */
 149#define LTQ_SPI_IRNEN_T_XRX	BIT(0)	/* Receive end interrupt request */
 150#define LTQ_SPI_IRNEN_ALL	0x1F
 151
 152struct lantiq_ssc_spi;
 153
 154struct lantiq_ssc_hwcfg {
 155	int (*cfg_irq)(struct platform_device *pdev, struct lantiq_ssc_spi *spi);
 156	unsigned int	irnen_r;
 157	unsigned int	irnen_t;
 158	unsigned int	irncr;
 159	unsigned int	irnicr;
 160	bool		irq_ack;
 161	u32		fifo_size_mask;
 162};
 163
 164struct lantiq_ssc_spi {
 165	struct spi_master		*master;
 166	struct device			*dev;
 167	void __iomem			*regbase;
 168	struct clk			*spi_clk;
 169	struct clk			*fpi_clk;
 170	const struct lantiq_ssc_hwcfg	*hwcfg;
 171
 172	spinlock_t			lock;
 173	struct workqueue_struct		*wq;
 174	struct work_struct		work;
 175
 176	const u8			*tx;
 177	u8				*rx;
 178	unsigned int			tx_todo;
 179	unsigned int			rx_todo;
 180	unsigned int			bits_per_word;
 181	unsigned int			speed_hz;
 182	unsigned int			tx_fifo_size;
 183	unsigned int			rx_fifo_size;
 184	unsigned int			base_cs;
 185	unsigned int			fdx_tx_level;
 186};
 187
 188static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
 189{
 190	return __raw_readl(spi->regbase + reg);
 191}
 192
 193static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
 194			      u32 reg)
 195{
 196	__raw_writel(val, spi->regbase + reg);
 197}
 198
 199static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
 200			     u32 set, u32 reg)
 201{
 202	u32 val = __raw_readl(spi->regbase + reg);
 203
 204	val &= ~clr;
 205	val |= set;
 206	__raw_writel(val, spi->regbase + reg);
 207}
 208
 209static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
 210{
 211	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 212	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
 213
 214	return (fstat >> LTQ_SPI_FSTAT_TXFFL_S) & hwcfg->fifo_size_mask;
 215}
 216
 217static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
 218{
 219	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 220	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
 221
 222	return (fstat >> LTQ_SPI_FSTAT_RXFFL_S) & hwcfg->fifo_size_mask;
 223}
 224
 225static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
 226{
 227	return spi->tx_fifo_size - tx_fifo_level(spi);
 228}
 229
 230static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
 231{
 232	u32 val = spi->rx_fifo_size << LTQ_SPI_RXFCON_RXFITL_S;
 233
 234	val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
 235	lantiq_ssc_writel(spi, val, LTQ_SPI_RXFCON);
 236}
 237
 238static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
 239{
 240	u32 val = 1 << LTQ_SPI_TXFCON_TXFITL_S;
 241
 242	val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
 243	lantiq_ssc_writel(spi, val, LTQ_SPI_TXFCON);
 244}
 245
 246static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
 247{
 248	lantiq_ssc_maskl(spi, 0, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
 249}
 250
 251static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
 252{
 253	lantiq_ssc_maskl(spi, 0, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
 254}
 255
 256static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
 257{
 258	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
 259}
 260
 261static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
 262{
 263	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
 264}
 265
 266static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
 267			      unsigned int max_speed_hz)
 268{
 269	u32 spi_clk, brt;
 270
 271	/*
 272	 * SPI module clock is derived from FPI bus clock dependent on
 273	 * divider value in CLC.RMS which is always set to 1.
 274	 *
 275	 *                 f_SPI
 276	 * baudrate = --------------
 277	 *             2 * (BR + 1)
 278	 */
 279	spi_clk = clk_get_rate(spi->fpi_clk) / 2;
 280
 281	if (max_speed_hz > spi_clk)
 282		brt = 0;
 283	else
 284		brt = spi_clk / max_speed_hz - 1;
 285
 286	if (brt > 0xFFFF)
 287		brt = 0xFFFF;
 288
 289	dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
 290		spi_clk, max_speed_hz, brt);
 291
 292	lantiq_ssc_writel(spi, brt, LTQ_SPI_BRT);
 293}
 294
 295static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
 296				   unsigned int bits_per_word)
 297{
 298	u32 bm;
 299
 300	/* CON.BM value = bits_per_word - 1 */
 301	bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_S;
 302
 303	lantiq_ssc_maskl(spi, LTQ_SPI_CON_BM_M, bm, LTQ_SPI_CON);
 304}
 305
 306static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
 307				unsigned int mode)
 308{
 309	u32 con_set = 0, con_clr = 0;
 310
 311	/*
 312	 * SPI mode mapping in CON register:
 313	 * Mode CPOL CPHA CON.PO CON.PH
 314	 *  0    0    0      0      1
 315	 *  1    0    1      0      0
 316	 *  2    1    0      1      1
 317	 *  3    1    1      1      0
 318	 */
 319	if (mode & SPI_CPHA)
 320		con_clr |= LTQ_SPI_CON_PH;
 321	else
 322		con_set |= LTQ_SPI_CON_PH;
 323
 324	if (mode & SPI_CPOL)
 325		con_set |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
 326	else
 327		con_clr |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
 328
 329	/* Set heading control */
 330	if (mode & SPI_LSB_FIRST)
 331		con_clr |= LTQ_SPI_CON_HB;
 332	else
 333		con_set |= LTQ_SPI_CON_HB;
 334
 335	/* Set loopback mode */
 336	if (mode & SPI_LOOP)
 337		con_set |= LTQ_SPI_CON_LB;
 338	else
 339		con_clr |= LTQ_SPI_CON_LB;
 340
 341	lantiq_ssc_maskl(spi, con_clr, con_set, LTQ_SPI_CON);
 342}
 343
 344static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
 345{
 346	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 347
 348	/*
 349	 * Set clock divider for run mode to 1 to
 350	 * run at same frequency as FPI bus
 351	 */
 352	lantiq_ssc_writel(spi, 1 << LTQ_SPI_CLC_RMC_S, LTQ_SPI_CLC);
 353
 354	/* Put controller into config mode */
 355	hw_enter_config_mode(spi);
 356
 357	/* Clear error flags */
 358	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
 359
 360	/* Enable error checking, disable TX/RX */
 361	lantiq_ssc_writel(spi, LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
 362		LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN | LTQ_SPI_CON_TXOFF |
 363		LTQ_SPI_CON_RXOFF, LTQ_SPI_CON);
 364
 365	/* Setup default SPI mode */
 366	hw_setup_bits_per_word(spi, spi->bits_per_word);
 367	hw_setup_clock_mode(spi, SPI_MODE_0);
 368
 369	/* Enable master mode and clear error flags */
 370	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETMS |
 371			       LTQ_SPI_WHBSTATE_CLR_ERRORS,
 372			       LTQ_SPI_WHBSTATE);
 373
 374	/* Reset GPIO/CS registers */
 375	lantiq_ssc_writel(spi, 0, LTQ_SPI_GPOCON);
 376	lantiq_ssc_writel(spi, 0xFF00, LTQ_SPI_FPGO);
 377
 378	/* Enable and flush FIFOs */
 379	rx_fifo_reset(spi);
 380	tx_fifo_reset(spi);
 381
 382	/* Enable interrupts */
 383	lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r |
 384			  LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
 385}
 386
 387static int lantiq_ssc_setup(struct spi_device *spidev)
 388{
 389	struct spi_master *master = spidev->master;
 390	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
 391	unsigned int cs = spidev->chip_select;
 392	u32 gpocon;
 393
 394	/* GPIOs are used for CS */
 395	if (spidev->cs_gpiod)
 396		return 0;
 397
 398	dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
 399
 400	if (cs < spi->base_cs) {
 401		dev_err(spi->dev,
 402			"chipselect %i too small (min %i)\n", cs, spi->base_cs);
 403		return -EINVAL;
 404	}
 405
 406	/* set GPO pin to CS mode */
 407	gpocon = 1 << ((cs - spi->base_cs) + LTQ_SPI_GPOCON_ISCSBN_S);
 408
 409	/* invert GPO pin */
 410	if (spidev->mode & SPI_CS_HIGH)
 411		gpocon |= 1 << (cs - spi->base_cs);
 412
 413	lantiq_ssc_maskl(spi, 0, gpocon, LTQ_SPI_GPOCON);
 414
 415	return 0;
 416}
 417
 418static int lantiq_ssc_prepare_message(struct spi_master *master,
 419				      struct spi_message *message)
 420{
 421	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
 422
 423	hw_enter_config_mode(spi);
 424	hw_setup_clock_mode(spi, message->spi->mode);
 425	hw_enter_active_mode(spi);
 426
 427	return 0;
 428}
 429
 430static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
 431			      struct spi_device *spidev, struct spi_transfer *t)
 432{
 433	unsigned int speed_hz = t->speed_hz;
 434	unsigned int bits_per_word = t->bits_per_word;
 435	u32 con;
 436
 437	if (bits_per_word != spi->bits_per_word ||
 438		speed_hz != spi->speed_hz) {
 439		hw_enter_config_mode(spi);
 440		hw_setup_speed_hz(spi, speed_hz);
 441		hw_setup_bits_per_word(spi, bits_per_word);
 442		hw_enter_active_mode(spi);
 443
 444		spi->speed_hz = speed_hz;
 445		spi->bits_per_word = bits_per_word;
 446	}
 447
 448	/* Configure transmitter and receiver */
 449	con = lantiq_ssc_readl(spi, LTQ_SPI_CON);
 450	if (t->tx_buf)
 451		con &= ~LTQ_SPI_CON_TXOFF;
 452	else
 453		con |= LTQ_SPI_CON_TXOFF;
 454
 455	if (t->rx_buf)
 456		con &= ~LTQ_SPI_CON_RXOFF;
 457	else
 458		con |= LTQ_SPI_CON_RXOFF;
 459
 460	lantiq_ssc_writel(spi, con, LTQ_SPI_CON);
 461}
 462
 463static int lantiq_ssc_unprepare_message(struct spi_master *master,
 464					struct spi_message *message)
 465{
 466	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
 467
 468	flush_workqueue(spi->wq);
 469
 470	/* Disable transmitter and receiver while idle */
 471	lantiq_ssc_maskl(spi, 0, LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF,
 472			 LTQ_SPI_CON);
 473
 474	return 0;
 475}
 476
 477static void tx_fifo_write(struct lantiq_ssc_spi *spi)
 478{
 479	const u8 *tx8;
 480	const u16 *tx16;
 481	const u32 *tx32;
 482	u32 data;
 483	unsigned int tx_free = tx_fifo_free(spi);
 484
 485	spi->fdx_tx_level = 0;
 486	while (spi->tx_todo && tx_free) {
 487		switch (spi->bits_per_word) {
 488		case 2 ... 8:
 489			tx8 = spi->tx;
 490			data = *tx8;
 491			spi->tx_todo--;
 492			spi->tx++;
 493			break;
 494		case 16:
 495			tx16 = (u16 *) spi->tx;
 496			data = *tx16;
 497			spi->tx_todo -= 2;
 498			spi->tx += 2;
 499			break;
 500		case 32:
 501			tx32 = (u32 *) spi->tx;
 502			data = *tx32;
 503			spi->tx_todo -= 4;
 504			spi->tx += 4;
 505			break;
 506		default:
 507			WARN_ON(1);
 508			data = 0;
 509			break;
 510		}
 511
 512		lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
 513		tx_free--;
 514		spi->fdx_tx_level++;
 515	}
 516}
 517
 518static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
 519{
 520	u8 *rx8;
 521	u16 *rx16;
 522	u32 *rx32;
 523	u32 data;
 524	unsigned int rx_fill = rx_fifo_level(spi);
 525
 526	/*
 527	 * Wait until all expected data to be shifted in.
 528	 * Otherwise, rx overrun may occur.
 529	 */
 530	while (rx_fill != spi->fdx_tx_level)
 531		rx_fill = rx_fifo_level(spi);
 532
 533	while (rx_fill) {
 534		data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
 535
 536		switch (spi->bits_per_word) {
 537		case 2 ... 8:
 538			rx8 = spi->rx;
 539			*rx8 = data;
 540			spi->rx_todo--;
 541			spi->rx++;
 542			break;
 543		case 16:
 544			rx16 = (u16 *) spi->rx;
 545			*rx16 = data;
 546			spi->rx_todo -= 2;
 547			spi->rx += 2;
 548			break;
 549		case 32:
 550			rx32 = (u32 *) spi->rx;
 551			*rx32 = data;
 552			spi->rx_todo -= 4;
 553			spi->rx += 4;
 554			break;
 555		default:
 556			WARN_ON(1);
 557			break;
 558		}
 559
 560		rx_fill--;
 561	}
 562}
 563
 564static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
 565{
 566	u32 data, *rx32;
 567	u8 *rx8;
 568	unsigned int rxbv, shift;
 569	unsigned int rx_fill = rx_fifo_level(spi);
 570
 571	/*
 572	 * In RX-only mode the bits per word value is ignored by HW. A value
 573	 * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
 574	 * If remaining RX bytes are less than 4, the FIFO must be read
 575	 * differently. The amount of received and valid bytes is indicated
 576	 * by STAT.RXBV register value.
 577	 */
 578	while (rx_fill) {
 579		if (spi->rx_todo < 4)  {
 580			rxbv = (lantiq_ssc_readl(spi, LTQ_SPI_STAT) &
 581				LTQ_SPI_STAT_RXBV_M) >> LTQ_SPI_STAT_RXBV_S;
 582			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
 583
 584			shift = (rxbv - 1) * 8;
 585			rx8 = spi->rx;
 586
 587			while (rxbv) {
 588				*rx8++ = (data >> shift) & 0xFF;
 589				rxbv--;
 590				shift -= 8;
 591				spi->rx_todo--;
 592				spi->rx++;
 593			}
 594		} else {
 595			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
 596			rx32 = (u32 *) spi->rx;
 597
 598			*rx32++ = data;
 599			spi->rx_todo -= 4;
 600			spi->rx += 4;
 601		}
 602		rx_fill--;
 603	}
 604}
 605
 606static void rx_request(struct lantiq_ssc_spi *spi)
 607{
 608	unsigned int rxreq, rxreq_max;
 609
 610	/*
 611	 * To avoid receive overflows at high clocks it is better to request
 612	 * only the amount of bytes that fits into all FIFOs. This value
 613	 * depends on the FIFO size implemented in hardware.
 614	 */
 615	rxreq = spi->rx_todo;
 616	rxreq_max = spi->rx_fifo_size * 4;
 617	if (rxreq > rxreq_max)
 618		rxreq = rxreq_max;
 619
 620	lantiq_ssc_writel(spi, rxreq, LTQ_SPI_RXREQ);
 621}
 622
 623static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
 624{
 625	struct lantiq_ssc_spi *spi = data;
 626	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 627	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
 628
 629	spin_lock(&spi->lock);
 630	if (hwcfg->irq_ack)
 631		lantiq_ssc_writel(spi, val, hwcfg->irncr);
 632
 633	if (spi->tx) {
 634		if (spi->rx && spi->rx_todo)
 635			rx_fifo_read_full_duplex(spi);
 636
 637		if (spi->tx_todo)
 638			tx_fifo_write(spi);
 639		else if (!tx_fifo_level(spi))
 640			goto completed;
 641	} else if (spi->rx) {
 642		if (spi->rx_todo) {
 643			rx_fifo_read_half_duplex(spi);
 644
 645			if (spi->rx_todo)
 646				rx_request(spi);
 647			else
 648				goto completed;
 649		} else {
 650			goto completed;
 651		}
 652	}
 653
 654	spin_unlock(&spi->lock);
 655	return IRQ_HANDLED;
 656
 657completed:
 658	queue_work(spi->wq, &spi->work);
 659	spin_unlock(&spi->lock);
 660
 661	return IRQ_HANDLED;
 662}
 663
 664static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
 665{
 666	struct lantiq_ssc_spi *spi = data;
 667	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 668	u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
 669	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
 670
 671	if (!(stat & LTQ_SPI_STAT_ERRORS))
 672		return IRQ_NONE;
 673
 674	spin_lock(&spi->lock);
 675	if (hwcfg->irq_ack)
 676		lantiq_ssc_writel(spi, val, hwcfg->irncr);
 677
 678	if (stat & LTQ_SPI_STAT_RUE)
 679		dev_err(spi->dev, "receive underflow error\n");
 680	if (stat & LTQ_SPI_STAT_TUE)
 681		dev_err(spi->dev, "transmit underflow error\n");
 682	if (stat & LTQ_SPI_STAT_AE)
 683		dev_err(spi->dev, "abort error\n");
 684	if (stat & LTQ_SPI_STAT_RE)
 685		dev_err(spi->dev, "receive overflow error\n");
 686	if (stat & LTQ_SPI_STAT_TE)
 687		dev_err(spi->dev, "transmit overflow error\n");
 688	if (stat & LTQ_SPI_STAT_ME)
 689		dev_err(spi->dev, "mode error\n");
 690
 691	/* Clear error flags */
 692	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
 693
 694	/* set bad status so it can be retried */
 695	if (spi->master->cur_msg)
 696		spi->master->cur_msg->status = -EIO;
 697	queue_work(spi->wq, &spi->work);
 698	spin_unlock(&spi->lock);
 699
 700	return IRQ_HANDLED;
 701}
 702
 703static irqreturn_t intel_lgm_ssc_isr(int irq, void *data)
 704{
 705	struct lantiq_ssc_spi *spi = data;
 706	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 707	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
 708
 709	if (!(val & LTQ_SPI_IRNEN_ALL))
 710		return IRQ_NONE;
 711
 712	if (val & LTQ_SPI_IRNEN_E)
 713		return lantiq_ssc_err_interrupt(irq, data);
 714
 715	if ((val & hwcfg->irnen_t) || (val & hwcfg->irnen_r))
 716		return lantiq_ssc_xmit_interrupt(irq, data);
 717
 718	return IRQ_HANDLED;
 719}
 720
 721static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
 722			  struct spi_transfer *t)
 723{
 724	unsigned long flags;
 725
 726	spin_lock_irqsave(&spi->lock, flags);
 727
 728	spi->tx = t->tx_buf;
 729	spi->rx = t->rx_buf;
 730
 731	if (t->tx_buf) {
 732		spi->tx_todo = t->len;
 733
 734		/* initially fill TX FIFO */
 735		tx_fifo_write(spi);
 736	}
 737
 738	if (spi->rx) {
 739		spi->rx_todo = t->len;
 740
 741		/* start shift clock in RX-only mode */
 742		if (!spi->tx)
 743			rx_request(spi);
 744	}
 745
 746	spin_unlock_irqrestore(&spi->lock, flags);
 747
 748	return t->len;
 749}
 750
 751/*
 752 * The driver only gets an interrupt when the FIFO is empty, but there
 753 * is an additional shift register from which the data is written to
 754 * the wire. We get the last interrupt when the controller starts to
 755 * write the last word to the wire, not when it is finished. Do busy
 756 * waiting till it finishes.
 757 */
 758static void lantiq_ssc_bussy_work(struct work_struct *work)
 759{
 760	struct lantiq_ssc_spi *spi;
 761	unsigned long long timeout = 8LL * 1000LL;
 762	unsigned long end;
 763
 764	spi = container_of(work, typeof(*spi), work);
 765
 766	do_div(timeout, spi->speed_hz);
 767	timeout += timeout + 100; /* some tolerance */
 768
 769	end = jiffies + msecs_to_jiffies(timeout);
 770	do {
 771		u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
 772
 773		if (!(stat & LTQ_SPI_STAT_BSY)) {
 774			spi_finalize_current_transfer(spi->master);
 775			return;
 776		}
 777
 778		cond_resched();
 779	} while (!time_after_eq(jiffies, end));
 780
 781	if (spi->master->cur_msg)
 782		spi->master->cur_msg->status = -EIO;
 783	spi_finalize_current_transfer(spi->master);
 784}
 785
 786static void lantiq_ssc_handle_err(struct spi_master *master,
 787				  struct spi_message *message)
 788{
 789	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
 790
 791	/* flush FIFOs on timeout */
 792	rx_fifo_flush(spi);
 793	tx_fifo_flush(spi);
 794}
 795
 796static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
 797{
 798	struct lantiq_ssc_spi *spi = spi_master_get_devdata(spidev->master);
 799	unsigned int cs = spidev->chip_select;
 800	u32 fgpo;
 801
 802	if (!!(spidev->mode & SPI_CS_HIGH) == enable)
 803		fgpo = (1 << (cs - spi->base_cs));
 804	else
 805		fgpo = (1 << (cs - spi->base_cs + LTQ_SPI_FGPO_SETOUTN_S));
 806
 807	lantiq_ssc_writel(spi, fgpo, LTQ_SPI_FPGO);
 808}
 809
 810static int lantiq_ssc_transfer_one(struct spi_master *master,
 811				   struct spi_device *spidev,
 812				   struct spi_transfer *t)
 813{
 814	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
 815
 816	hw_setup_transfer(spi, spidev, t);
 817
 818	return transfer_start(spi, spidev, t);
 819}
 820
 821static int intel_lgm_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
 822{
 823	int irq;
 824
 825	irq = platform_get_irq(pdev, 0);
 826	if (irq < 0)
 827		return irq;
 828
 829	return devm_request_irq(&pdev->dev, irq, intel_lgm_ssc_isr, 0, "spi", spi);
 830}
 831
 832static int lantiq_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
 833{
 834	int irq, err;
 835
 836	irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
 837	if (irq < 0)
 838		return irq;
 839
 840	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
 841			       0, LTQ_SPI_RX_IRQ_NAME, spi);
 842	if (err)
 843		return err;
 844
 845	irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
 846	if (irq < 0)
 847		return irq;
 848
 849	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
 850			       0, LTQ_SPI_TX_IRQ_NAME, spi);
 851
 852	if (err)
 853		return err;
 854
 855	irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
 856	if (irq < 0)
 857		return irq;
 858
 859	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_err_interrupt,
 860			       0, LTQ_SPI_ERR_IRQ_NAME, spi);
 861	return err;
 862}
 863
 864static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
 865	.cfg_irq	= lantiq_cfg_irq,
 866	.irnen_r	= LTQ_SPI_IRNEN_R_XWAY,
 867	.irnen_t	= LTQ_SPI_IRNEN_T_XWAY,
 868	.irnicr		= 0xF8,
 869	.irncr		= 0xFC,
 870	.fifo_size_mask	= GENMASK(5, 0),
 871	.irq_ack	= false,
 872};
 873
 874static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
 875	.cfg_irq	= lantiq_cfg_irq,
 876	.irnen_r	= LTQ_SPI_IRNEN_R_XRX,
 877	.irnen_t	= LTQ_SPI_IRNEN_T_XRX,
 878	.irnicr		= 0xF8,
 879	.irncr		= 0xFC,
 880	.fifo_size_mask	= GENMASK(5, 0),
 881	.irq_ack	= false,
 882};
 883
 884static const struct lantiq_ssc_hwcfg intel_ssc_lgm = {
 885	.cfg_irq	= intel_lgm_cfg_irq,
 886	.irnen_r	= LTQ_SPI_IRNEN_R_XRX,
 887	.irnen_t	= LTQ_SPI_IRNEN_T_XRX,
 888	.irnicr		= 0xFC,
 889	.irncr		= 0xF8,
 890	.fifo_size_mask	= GENMASK(7, 0),
 891	.irq_ack	= true,
 892};
 893
 894static const struct of_device_id lantiq_ssc_match[] = {
 895	{ .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
 896	{ .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
 897	{ .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
 898	{ .compatible = "intel,lgm-spi", .data = &intel_ssc_lgm, },
 899	{},
 900};
 901MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
 902
 903static int lantiq_ssc_probe(struct platform_device *pdev)
 904{
 905	struct device *dev = &pdev->dev;
 906	struct spi_master *master;
 907	struct lantiq_ssc_spi *spi;
 908	const struct lantiq_ssc_hwcfg *hwcfg;
 909	u32 id, supports_dma, revision;
 910	unsigned int num_cs;
 911	int err;
 912
 913	hwcfg = of_device_get_match_data(dev);
 914
 915	master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
 916	if (!master)
 917		return -ENOMEM;
 918
 919	spi = spi_master_get_devdata(master);
 920	spi->master = master;
 921	spi->dev = dev;
 922	spi->hwcfg = hwcfg;
 923	platform_set_drvdata(pdev, spi);
 924	spi->regbase = devm_platform_ioremap_resource(pdev, 0);
 925	if (IS_ERR(spi->regbase)) {
 926		err = PTR_ERR(spi->regbase);
 927		goto err_master_put;
 928	}
 929
 930	err = hwcfg->cfg_irq(pdev, spi);
 931	if (err)
 932		goto err_master_put;
 933
 934	spi->spi_clk = devm_clk_get(dev, "gate");
 935	if (IS_ERR(spi->spi_clk)) {
 936		err = PTR_ERR(spi->spi_clk);
 937		goto err_master_put;
 938	}
 939	err = clk_prepare_enable(spi->spi_clk);
 940	if (err)
 941		goto err_master_put;
 942
 943	/*
 944	 * Use the old clk_get_fpi() function on Lantiq platform, till it
 945	 * supports common clk.
 946	 */
 947#if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
 948	spi->fpi_clk = clk_get_fpi();
 949#else
 950	spi->fpi_clk = clk_get(dev, "freq");
 951#endif
 952	if (IS_ERR(spi->fpi_clk)) {
 953		err = PTR_ERR(spi->fpi_clk);
 954		goto err_clk_disable;
 955	}
 956
 957	num_cs = 8;
 958	of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
 959
 960	spi->base_cs = 1;
 961	of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
 962
 963	spin_lock_init(&spi->lock);
 964	spi->bits_per_word = 8;
 965	spi->speed_hz = 0;
 966
 967	master->dev.of_node = pdev->dev.of_node;
 968	master->num_chipselect = num_cs;
 969	master->use_gpio_descriptors = true;
 970	master->setup = lantiq_ssc_setup;
 971	master->set_cs = lantiq_ssc_set_cs;
 972	master->handle_err = lantiq_ssc_handle_err;
 973	master->prepare_message = lantiq_ssc_prepare_message;
 974	master->unprepare_message = lantiq_ssc_unprepare_message;
 975	master->transfer_one = lantiq_ssc_transfer_one;
 976	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
 977				SPI_LOOP;
 978	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
 979				     SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
 980
 981	spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM);
 982	if (!spi->wq) {
 983		err = -ENOMEM;
 984		goto err_clk_put;
 985	}
 986	INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
 987
 988	id = lantiq_ssc_readl(spi, LTQ_SPI_ID);
 989	spi->tx_fifo_size = (id >> LTQ_SPI_ID_TXFS_S) & hwcfg->fifo_size_mask;
 990	spi->rx_fifo_size = (id >> LTQ_SPI_ID_RXFS_S) & hwcfg->fifo_size_mask;
 991	supports_dma = (id & LTQ_SPI_ID_CFG_M) >> LTQ_SPI_ID_CFG_S;
 992	revision = id & LTQ_SPI_ID_REV_M;
 993
 994	lantiq_ssc_hw_init(spi);
 995
 996	dev_info(dev,
 997		"Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
 998		revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
 999
1000	err = devm_spi_register_master(dev, master);
1001	if (err) {
1002		dev_err(dev, "failed to register spi_master\n");
1003		goto err_wq_destroy;
1004	}
1005
1006	return 0;
1007
1008err_wq_destroy:
1009	destroy_workqueue(spi->wq);
1010err_clk_put:
1011	clk_put(spi->fpi_clk);
1012err_clk_disable:
1013	clk_disable_unprepare(spi->spi_clk);
1014err_master_put:
1015	spi_master_put(master);
1016
1017	return err;
1018}
1019
1020static int lantiq_ssc_remove(struct platform_device *pdev)
1021{
1022	struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
1023
1024	lantiq_ssc_writel(spi, 0, LTQ_SPI_IRNEN);
1025	lantiq_ssc_writel(spi, 0, LTQ_SPI_CLC);
1026	rx_fifo_flush(spi);
1027	tx_fifo_flush(spi);
1028	hw_enter_config_mode(spi);
1029
1030	destroy_workqueue(spi->wq);
1031	clk_disable_unprepare(spi->spi_clk);
1032	clk_put(spi->fpi_clk);
1033
1034	return 0;
1035}
1036
1037static struct platform_driver lantiq_ssc_driver = {
1038	.probe = lantiq_ssc_probe,
1039	.remove = lantiq_ssc_remove,
1040	.driver = {
1041		.name = "spi-lantiq-ssc",
1042		.of_match_table = lantiq_ssc_match,
1043	},
1044};
1045module_platform_driver(lantiq_ssc_driver);
1046
1047MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
1048MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
1049MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
1050MODULE_LICENSE("GPL");
1051MODULE_ALIAS("platform:spi-lantiq-ssc");
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
   4 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/of.h>
  10#include <linux/platform_device.h>
  11#include <linux/clk.h>
  12#include <linux/io.h>
  13#include <linux/delay.h>
  14#include <linux/interrupt.h>
  15#include <linux/sched.h>
  16#include <linux/completion.h>
  17#include <linux/spinlock.h>
  18#include <linux/err.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/spi/spi.h>
  21
  22#ifdef CONFIG_LANTIQ
  23#include <lantiq_soc.h>
  24#endif
  25
  26#define LTQ_SPI_RX_IRQ_NAME	"spi_rx"
  27#define LTQ_SPI_TX_IRQ_NAME	"spi_tx"
  28#define LTQ_SPI_ERR_IRQ_NAME	"spi_err"
  29#define LTQ_SPI_FRM_IRQ_NAME	"spi_frm"
  30
  31#define LTQ_SPI_CLC		0x00
  32#define LTQ_SPI_PISEL		0x04
  33#define LTQ_SPI_ID		0x08
  34#define LTQ_SPI_CON		0x10
  35#define LTQ_SPI_STAT		0x14
  36#define LTQ_SPI_WHBSTATE	0x18
  37#define LTQ_SPI_TB		0x20
  38#define LTQ_SPI_RB		0x24
  39#define LTQ_SPI_RXFCON		0x30
  40#define LTQ_SPI_TXFCON		0x34
  41#define LTQ_SPI_FSTAT		0x38
  42#define LTQ_SPI_BRT		0x40
  43#define LTQ_SPI_BRSTAT		0x44
  44#define LTQ_SPI_SFCON		0x60
  45#define LTQ_SPI_SFSTAT		0x64
  46#define LTQ_SPI_GPOCON		0x70
  47#define LTQ_SPI_GPOSTAT		0x74
  48#define LTQ_SPI_FPGO		0x78
  49#define LTQ_SPI_RXREQ		0x80
  50#define LTQ_SPI_RXCNT		0x84
  51#define LTQ_SPI_DMACON		0xec
  52#define LTQ_SPI_IRNEN		0xf4
  53
  54#define LTQ_SPI_CLC_SMC_S	16	/* Clock divider for sleep mode */
  55#define LTQ_SPI_CLC_SMC_M	(0xFF << LTQ_SPI_CLC_SMC_S)
  56#define LTQ_SPI_CLC_RMC_S	8	/* Clock divider for normal run mode */
  57#define LTQ_SPI_CLC_RMC_M	(0xFF << LTQ_SPI_CLC_RMC_S)
  58#define LTQ_SPI_CLC_DISS	BIT(1)	/* Disable status bit */
  59#define LTQ_SPI_CLC_DISR	BIT(0)	/* Disable request bit */
  60
  61#define LTQ_SPI_ID_TXFS_S	24	/* Implemented TX FIFO size */
  62#define LTQ_SPI_ID_RXFS_S	16	/* Implemented RX FIFO size */
  63#define LTQ_SPI_ID_MOD_S	8	/* Module ID */
  64#define LTQ_SPI_ID_MOD_M	(0xff << LTQ_SPI_ID_MOD_S)
  65#define LTQ_SPI_ID_CFG_S	5	/* DMA interface support */
  66#define LTQ_SPI_ID_CFG_M	(1 << LTQ_SPI_ID_CFG_S)
  67#define LTQ_SPI_ID_REV_M	0x1F	/* Hardware revision number */
  68
  69#define LTQ_SPI_CON_BM_S	16	/* Data width selection */
  70#define LTQ_SPI_CON_BM_M	(0x1F << LTQ_SPI_CON_BM_S)
  71#define LTQ_SPI_CON_EM		BIT(24)	/* Echo mode */
  72#define LTQ_SPI_CON_IDLE	BIT(23)	/* Idle bit value */
  73#define LTQ_SPI_CON_ENBV	BIT(22)	/* Enable byte valid control */
  74#define LTQ_SPI_CON_RUEN	BIT(12)	/* Receive underflow error enable */
  75#define LTQ_SPI_CON_TUEN	BIT(11)	/* Transmit underflow error enable */
  76#define LTQ_SPI_CON_AEN		BIT(10)	/* Abort error enable */
  77#define LTQ_SPI_CON_REN		BIT(9)	/* Receive overflow error enable */
  78#define LTQ_SPI_CON_TEN		BIT(8)	/* Transmit overflow error enable */
  79#define LTQ_SPI_CON_LB		BIT(7)	/* Loopback control */
  80#define LTQ_SPI_CON_PO		BIT(6)	/* Clock polarity control */
  81#define LTQ_SPI_CON_PH		BIT(5)	/* Clock phase control */
  82#define LTQ_SPI_CON_HB		BIT(4)	/* Heading control */
  83#define LTQ_SPI_CON_RXOFF	BIT(1)	/* Switch receiver off */
  84#define LTQ_SPI_CON_TXOFF	BIT(0)	/* Switch transmitter off */
  85
  86#define LTQ_SPI_STAT_RXBV_S	28
  87#define LTQ_SPI_STAT_RXBV_M	(0x7 << LTQ_SPI_STAT_RXBV_S)
  88#define LTQ_SPI_STAT_BSY	BIT(13)	/* Busy flag */
  89#define LTQ_SPI_STAT_RUE	BIT(12)	/* Receive underflow error flag */
  90#define LTQ_SPI_STAT_TUE	BIT(11)	/* Transmit underflow error flag */
  91#define LTQ_SPI_STAT_AE		BIT(10)	/* Abort error flag */
  92#define LTQ_SPI_STAT_RE		BIT(9)	/* Receive error flag */
  93#define LTQ_SPI_STAT_TE		BIT(8)	/* Transmit error flag */
  94#define LTQ_SPI_STAT_ME		BIT(7)	/* Mode error flag */
  95#define LTQ_SPI_STAT_MS		BIT(1)	/* Host/target select bit */
  96#define LTQ_SPI_STAT_EN		BIT(0)	/* Enable bit */
  97#define LTQ_SPI_STAT_ERRORS	(LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
  98				 LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
  99				 LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE)
 100
 101#define LTQ_SPI_WHBSTATE_SETTUE	BIT(15)	/* Set transmit underflow error flag */
 102#define LTQ_SPI_WHBSTATE_SETAE	BIT(14)	/* Set abort error flag */
 103#define LTQ_SPI_WHBSTATE_SETRE	BIT(13)	/* Set receive error flag */
 104#define LTQ_SPI_WHBSTATE_SETTE	BIT(12)	/* Set transmit error flag */
 105#define LTQ_SPI_WHBSTATE_CLRTUE	BIT(11)	/* Clear transmit underflow error flag */
 106#define LTQ_SPI_WHBSTATE_CLRAE	BIT(10)	/* Clear abort error flag */
 107#define LTQ_SPI_WHBSTATE_CLRRE	BIT(9)	/* Clear receive error flag */
 108#define LTQ_SPI_WHBSTATE_CLRTE	BIT(8)	/* Clear transmit error flag */
 109#define LTQ_SPI_WHBSTATE_SETME	BIT(7)	/* Set mode error flag */
 110#define LTQ_SPI_WHBSTATE_CLRME	BIT(6)	/* Clear mode error flag */
 111#define LTQ_SPI_WHBSTATE_SETRUE	BIT(5)	/* Set receive underflow error flag */
 112#define LTQ_SPI_WHBSTATE_CLRRUE	BIT(4)	/* Clear receive underflow error flag */
 113#define LTQ_SPI_WHBSTATE_SETMS	BIT(3)	/* Set host select bit */
 114#define LTQ_SPI_WHBSTATE_CLRMS	BIT(2)	/* Clear host select bit */
 115#define LTQ_SPI_WHBSTATE_SETEN	BIT(1)	/* Set enable bit (operational mode) */
 116#define LTQ_SPI_WHBSTATE_CLREN	BIT(0)	/* Clear enable bit (config mode */
 117#define LTQ_SPI_WHBSTATE_CLR_ERRORS	(LTQ_SPI_WHBSTATE_CLRRUE | \
 118					 LTQ_SPI_WHBSTATE_CLRME | \
 119					 LTQ_SPI_WHBSTATE_CLRTE | \
 120					 LTQ_SPI_WHBSTATE_CLRRE | \
 121					 LTQ_SPI_WHBSTATE_CLRAE | \
 122					 LTQ_SPI_WHBSTATE_CLRTUE)
 123
 124#define LTQ_SPI_RXFCON_RXFITL_S	8	/* FIFO interrupt trigger level */
 125#define LTQ_SPI_RXFCON_RXFLU	BIT(1)	/* FIFO flush */
 126#define LTQ_SPI_RXFCON_RXFEN	BIT(0)	/* FIFO enable */
 127
 128#define LTQ_SPI_TXFCON_TXFITL_S	8	/* FIFO interrupt trigger level */
 129#define LTQ_SPI_TXFCON_TXFLU	BIT(1)	/* FIFO flush */
 130#define LTQ_SPI_TXFCON_TXFEN	BIT(0)	/* FIFO enable */
 131
 132#define LTQ_SPI_FSTAT_RXFFL_S	0
 133#define LTQ_SPI_FSTAT_TXFFL_S	8
 134
 135#define LTQ_SPI_GPOCON_ISCSBN_S	8
 136#define LTQ_SPI_GPOCON_INVOUTN_S	0
 137
 138#define LTQ_SPI_FGPO_SETOUTN_S	8
 139#define LTQ_SPI_FGPO_CLROUTN_S	0
 140
 141#define LTQ_SPI_RXREQ_RXCNT_M	0xFFFF	/* Receive count value */
 142#define LTQ_SPI_RXCNT_TODO_M	0xFFFF	/* Recevie to-do value */
 143
 144#define LTQ_SPI_IRNEN_TFI	BIT(4)	/* TX finished interrupt */
 145#define LTQ_SPI_IRNEN_F		BIT(3)	/* Frame end interrupt request */
 146#define LTQ_SPI_IRNEN_E		BIT(2)	/* Error end interrupt request */
 147#define LTQ_SPI_IRNEN_T_XWAY	BIT(1)	/* Transmit end interrupt request */
 148#define LTQ_SPI_IRNEN_R_XWAY	BIT(0)	/* Receive end interrupt request */
 149#define LTQ_SPI_IRNEN_R_XRX	BIT(1)	/* Transmit end interrupt request */
 150#define LTQ_SPI_IRNEN_T_XRX	BIT(0)	/* Receive end interrupt request */
 151#define LTQ_SPI_IRNEN_ALL	0x1F
 152
 153struct lantiq_ssc_spi;
 154
 155struct lantiq_ssc_hwcfg {
 156	int (*cfg_irq)(struct platform_device *pdev, struct lantiq_ssc_spi *spi);
 157	unsigned int	irnen_r;
 158	unsigned int	irnen_t;
 159	unsigned int	irncr;
 160	unsigned int	irnicr;
 161	bool		irq_ack;
 162	u32		fifo_size_mask;
 163};
 164
 165struct lantiq_ssc_spi {
 166	struct spi_controller		*host;
 167	struct device			*dev;
 168	void __iomem			*regbase;
 169	struct clk			*spi_clk;
 170	struct clk			*fpi_clk;
 171	const struct lantiq_ssc_hwcfg	*hwcfg;
 172
 173	spinlock_t			lock;
 174	struct workqueue_struct		*wq;
 175	struct work_struct		work;
 176
 177	const u8			*tx;
 178	u8				*rx;
 179	unsigned int			tx_todo;
 180	unsigned int			rx_todo;
 181	unsigned int			bits_per_word;
 182	unsigned int			speed_hz;
 183	unsigned int			tx_fifo_size;
 184	unsigned int			rx_fifo_size;
 185	unsigned int			base_cs;
 186	unsigned int			fdx_tx_level;
 187};
 188
 189static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
 190{
 191	return __raw_readl(spi->regbase + reg);
 192}
 193
 194static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
 195			      u32 reg)
 196{
 197	__raw_writel(val, spi->regbase + reg);
 198}
 199
 200static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
 201			     u32 set, u32 reg)
 202{
 203	u32 val = __raw_readl(spi->regbase + reg);
 204
 205	val &= ~clr;
 206	val |= set;
 207	__raw_writel(val, spi->regbase + reg);
 208}
 209
 210static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
 211{
 212	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 213	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
 214
 215	return (fstat >> LTQ_SPI_FSTAT_TXFFL_S) & hwcfg->fifo_size_mask;
 216}
 217
 218static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
 219{
 220	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 221	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
 222
 223	return (fstat >> LTQ_SPI_FSTAT_RXFFL_S) & hwcfg->fifo_size_mask;
 224}
 225
 226static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
 227{
 228	return spi->tx_fifo_size - tx_fifo_level(spi);
 229}
 230
 231static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
 232{
 233	u32 val = spi->rx_fifo_size << LTQ_SPI_RXFCON_RXFITL_S;
 234
 235	val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
 236	lantiq_ssc_writel(spi, val, LTQ_SPI_RXFCON);
 237}
 238
 239static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
 240{
 241	u32 val = 1 << LTQ_SPI_TXFCON_TXFITL_S;
 242
 243	val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
 244	lantiq_ssc_writel(spi, val, LTQ_SPI_TXFCON);
 245}
 246
 247static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
 248{
 249	lantiq_ssc_maskl(spi, 0, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
 250}
 251
 252static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
 253{
 254	lantiq_ssc_maskl(spi, 0, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
 255}
 256
 257static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
 258{
 259	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
 260}
 261
 262static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
 263{
 264	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
 265}
 266
 267static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
 268			      unsigned int max_speed_hz)
 269{
 270	u32 spi_clk, brt;
 271
 272	/*
 273	 * SPI module clock is derived from FPI bus clock dependent on
 274	 * divider value in CLC.RMS which is always set to 1.
 275	 *
 276	 *                 f_SPI
 277	 * baudrate = --------------
 278	 *             2 * (BR + 1)
 279	 */
 280	spi_clk = clk_get_rate(spi->fpi_clk) / 2;
 281
 282	if (max_speed_hz > spi_clk)
 283		brt = 0;
 284	else
 285		brt = spi_clk / max_speed_hz - 1;
 286
 287	if (brt > 0xFFFF)
 288		brt = 0xFFFF;
 289
 290	dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
 291		spi_clk, max_speed_hz, brt);
 292
 293	lantiq_ssc_writel(spi, brt, LTQ_SPI_BRT);
 294}
 295
 296static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
 297				   unsigned int bits_per_word)
 298{
 299	u32 bm;
 300
 301	/* CON.BM value = bits_per_word - 1 */
 302	bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_S;
 303
 304	lantiq_ssc_maskl(spi, LTQ_SPI_CON_BM_M, bm, LTQ_SPI_CON);
 305}
 306
 307static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
 308				unsigned int mode)
 309{
 310	u32 con_set = 0, con_clr = 0;
 311
 312	/*
 313	 * SPI mode mapping in CON register:
 314	 * Mode CPOL CPHA CON.PO CON.PH
 315	 *  0    0    0      0      1
 316	 *  1    0    1      0      0
 317	 *  2    1    0      1      1
 318	 *  3    1    1      1      0
 319	 */
 320	if (mode & SPI_CPHA)
 321		con_clr |= LTQ_SPI_CON_PH;
 322	else
 323		con_set |= LTQ_SPI_CON_PH;
 324
 325	if (mode & SPI_CPOL)
 326		con_set |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
 327	else
 328		con_clr |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
 329
 330	/* Set heading control */
 331	if (mode & SPI_LSB_FIRST)
 332		con_clr |= LTQ_SPI_CON_HB;
 333	else
 334		con_set |= LTQ_SPI_CON_HB;
 335
 336	/* Set loopback mode */
 337	if (mode & SPI_LOOP)
 338		con_set |= LTQ_SPI_CON_LB;
 339	else
 340		con_clr |= LTQ_SPI_CON_LB;
 341
 342	lantiq_ssc_maskl(spi, con_clr, con_set, LTQ_SPI_CON);
 343}
 344
 345static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
 346{
 347	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 348
 349	/*
 350	 * Set clock divider for run mode to 1 to
 351	 * run at same frequency as FPI bus
 352	 */
 353	lantiq_ssc_writel(spi, 1 << LTQ_SPI_CLC_RMC_S, LTQ_SPI_CLC);
 354
 355	/* Put controller into config mode */
 356	hw_enter_config_mode(spi);
 357
 358	/* Clear error flags */
 359	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
 360
 361	/* Enable error checking, disable TX/RX */
 362	lantiq_ssc_writel(spi, LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
 363		LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN | LTQ_SPI_CON_TXOFF |
 364		LTQ_SPI_CON_RXOFF, LTQ_SPI_CON);
 365
 366	/* Setup default SPI mode */
 367	hw_setup_bits_per_word(spi, spi->bits_per_word);
 368	hw_setup_clock_mode(spi, SPI_MODE_0);
 369
 370	/* Enable host mode and clear error flags */
 371	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETMS |
 372			       LTQ_SPI_WHBSTATE_CLR_ERRORS,
 373			       LTQ_SPI_WHBSTATE);
 374
 375	/* Reset GPIO/CS registers */
 376	lantiq_ssc_writel(spi, 0, LTQ_SPI_GPOCON);
 377	lantiq_ssc_writel(spi, 0xFF00, LTQ_SPI_FPGO);
 378
 379	/* Enable and flush FIFOs */
 380	rx_fifo_reset(spi);
 381	tx_fifo_reset(spi);
 382
 383	/* Enable interrupts */
 384	lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r |
 385			  LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
 386}
 387
 388static int lantiq_ssc_setup(struct spi_device *spidev)
 389{
 390	struct spi_controller *host = spidev->controller;
 391	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 392	unsigned int cs = spi_get_chipselect(spidev, 0);
 393	u32 gpocon;
 394
 395	/* GPIOs are used for CS */
 396	if (spi_get_csgpiod(spidev, 0))
 397		return 0;
 398
 399	dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
 400
 401	if (cs < spi->base_cs) {
 402		dev_err(spi->dev,
 403			"chipselect %i too small (min %i)\n", cs, spi->base_cs);
 404		return -EINVAL;
 405	}
 406
 407	/* set GPO pin to CS mode */
 408	gpocon = 1 << ((cs - spi->base_cs) + LTQ_SPI_GPOCON_ISCSBN_S);
 409
 410	/* invert GPO pin */
 411	if (spidev->mode & SPI_CS_HIGH)
 412		gpocon |= 1 << (cs - spi->base_cs);
 413
 414	lantiq_ssc_maskl(spi, 0, gpocon, LTQ_SPI_GPOCON);
 415
 416	return 0;
 417}
 418
 419static int lantiq_ssc_prepare_message(struct spi_controller *host,
 420				      struct spi_message *message)
 421{
 422	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 423
 424	hw_enter_config_mode(spi);
 425	hw_setup_clock_mode(spi, message->spi->mode);
 426	hw_enter_active_mode(spi);
 427
 428	return 0;
 429}
 430
 431static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
 432			      struct spi_device *spidev, struct spi_transfer *t)
 433{
 434	unsigned int speed_hz = t->speed_hz;
 435	unsigned int bits_per_word = t->bits_per_word;
 436	u32 con;
 437
 438	if (bits_per_word != spi->bits_per_word ||
 439		speed_hz != spi->speed_hz) {
 440		hw_enter_config_mode(spi);
 441		hw_setup_speed_hz(spi, speed_hz);
 442		hw_setup_bits_per_word(spi, bits_per_word);
 443		hw_enter_active_mode(spi);
 444
 445		spi->speed_hz = speed_hz;
 446		spi->bits_per_word = bits_per_word;
 447	}
 448
 449	/* Configure transmitter and receiver */
 450	con = lantiq_ssc_readl(spi, LTQ_SPI_CON);
 451	if (t->tx_buf)
 452		con &= ~LTQ_SPI_CON_TXOFF;
 453	else
 454		con |= LTQ_SPI_CON_TXOFF;
 455
 456	if (t->rx_buf)
 457		con &= ~LTQ_SPI_CON_RXOFF;
 458	else
 459		con |= LTQ_SPI_CON_RXOFF;
 460
 461	lantiq_ssc_writel(spi, con, LTQ_SPI_CON);
 462}
 463
 464static int lantiq_ssc_unprepare_message(struct spi_controller *host,
 465					struct spi_message *message)
 466{
 467	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 468
 469	flush_workqueue(spi->wq);
 470
 471	/* Disable transmitter and receiver while idle */
 472	lantiq_ssc_maskl(spi, 0, LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF,
 473			 LTQ_SPI_CON);
 474
 475	return 0;
 476}
 477
 478static void tx_fifo_write(struct lantiq_ssc_spi *spi)
 479{
 480	const u8 *tx8;
 481	const u16 *tx16;
 482	const u32 *tx32;
 483	u32 data;
 484	unsigned int tx_free = tx_fifo_free(spi);
 485
 486	spi->fdx_tx_level = 0;
 487	while (spi->tx_todo && tx_free) {
 488		switch (spi->bits_per_word) {
 489		case 2 ... 8:
 490			tx8 = spi->tx;
 491			data = *tx8;
 492			spi->tx_todo--;
 493			spi->tx++;
 494			break;
 495		case 16:
 496			tx16 = (u16 *) spi->tx;
 497			data = *tx16;
 498			spi->tx_todo -= 2;
 499			spi->tx += 2;
 500			break;
 501		case 32:
 502			tx32 = (u32 *) spi->tx;
 503			data = *tx32;
 504			spi->tx_todo -= 4;
 505			spi->tx += 4;
 506			break;
 507		default:
 508			WARN_ON(1);
 509			data = 0;
 510			break;
 511		}
 512
 513		lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
 514		tx_free--;
 515		spi->fdx_tx_level++;
 516	}
 517}
 518
 519static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
 520{
 521	u8 *rx8;
 522	u16 *rx16;
 523	u32 *rx32;
 524	u32 data;
 525	unsigned int rx_fill = rx_fifo_level(spi);
 526
 527	/*
 528	 * Wait until all expected data to be shifted in.
 529	 * Otherwise, rx overrun may occur.
 530	 */
 531	while (rx_fill != spi->fdx_tx_level)
 532		rx_fill = rx_fifo_level(spi);
 533
 534	while (rx_fill) {
 535		data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
 536
 537		switch (spi->bits_per_word) {
 538		case 2 ... 8:
 539			rx8 = spi->rx;
 540			*rx8 = data;
 541			spi->rx_todo--;
 542			spi->rx++;
 543			break;
 544		case 16:
 545			rx16 = (u16 *) spi->rx;
 546			*rx16 = data;
 547			spi->rx_todo -= 2;
 548			spi->rx += 2;
 549			break;
 550		case 32:
 551			rx32 = (u32 *) spi->rx;
 552			*rx32 = data;
 553			spi->rx_todo -= 4;
 554			spi->rx += 4;
 555			break;
 556		default:
 557			WARN_ON(1);
 558			break;
 559		}
 560
 561		rx_fill--;
 562	}
 563}
 564
 565static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
 566{
 567	u32 data, *rx32;
 568	u8 *rx8;
 569	unsigned int rxbv, shift;
 570	unsigned int rx_fill = rx_fifo_level(spi);
 571
 572	/*
 573	 * In RX-only mode the bits per word value is ignored by HW. A value
 574	 * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
 575	 * If remaining RX bytes are less than 4, the FIFO must be read
 576	 * differently. The amount of received and valid bytes is indicated
 577	 * by STAT.RXBV register value.
 578	 */
 579	while (rx_fill) {
 580		if (spi->rx_todo < 4)  {
 581			rxbv = (lantiq_ssc_readl(spi, LTQ_SPI_STAT) &
 582				LTQ_SPI_STAT_RXBV_M) >> LTQ_SPI_STAT_RXBV_S;
 583			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
 584
 585			shift = (rxbv - 1) * 8;
 586			rx8 = spi->rx;
 587
 588			while (rxbv) {
 589				*rx8++ = (data >> shift) & 0xFF;
 590				rxbv--;
 591				shift -= 8;
 592				spi->rx_todo--;
 593				spi->rx++;
 594			}
 595		} else {
 596			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
 597			rx32 = (u32 *) spi->rx;
 598
 599			*rx32++ = data;
 600			spi->rx_todo -= 4;
 601			spi->rx += 4;
 602		}
 603		rx_fill--;
 604	}
 605}
 606
 607static void rx_request(struct lantiq_ssc_spi *spi)
 608{
 609	unsigned int rxreq, rxreq_max;
 610
 611	/*
 612	 * To avoid receive overflows at high clocks it is better to request
 613	 * only the amount of bytes that fits into all FIFOs. This value
 614	 * depends on the FIFO size implemented in hardware.
 615	 */
 616	rxreq = spi->rx_todo;
 617	rxreq_max = spi->rx_fifo_size * 4;
 618	if (rxreq > rxreq_max)
 619		rxreq = rxreq_max;
 620
 621	lantiq_ssc_writel(spi, rxreq, LTQ_SPI_RXREQ);
 622}
 623
 624static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
 625{
 626	struct lantiq_ssc_spi *spi = data;
 627	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 628	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
 629
 630	spin_lock(&spi->lock);
 631	if (hwcfg->irq_ack)
 632		lantiq_ssc_writel(spi, val, hwcfg->irncr);
 633
 634	if (spi->tx) {
 635		if (spi->rx && spi->rx_todo)
 636			rx_fifo_read_full_duplex(spi);
 637
 638		if (spi->tx_todo)
 639			tx_fifo_write(spi);
 640		else if (!tx_fifo_level(spi))
 641			goto completed;
 642	} else if (spi->rx) {
 643		if (spi->rx_todo) {
 644			rx_fifo_read_half_duplex(spi);
 645
 646			if (spi->rx_todo)
 647				rx_request(spi);
 648			else
 649				goto completed;
 650		} else {
 651			goto completed;
 652		}
 653	}
 654
 655	spin_unlock(&spi->lock);
 656	return IRQ_HANDLED;
 657
 658completed:
 659	queue_work(spi->wq, &spi->work);
 660	spin_unlock(&spi->lock);
 661
 662	return IRQ_HANDLED;
 663}
 664
 665static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
 666{
 667	struct lantiq_ssc_spi *spi = data;
 668	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 669	u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
 670	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
 671
 672	if (!(stat & LTQ_SPI_STAT_ERRORS))
 673		return IRQ_NONE;
 674
 675	spin_lock(&spi->lock);
 676	if (hwcfg->irq_ack)
 677		lantiq_ssc_writel(spi, val, hwcfg->irncr);
 678
 679	if (stat & LTQ_SPI_STAT_RUE)
 680		dev_err(spi->dev, "receive underflow error\n");
 681	if (stat & LTQ_SPI_STAT_TUE)
 682		dev_err(spi->dev, "transmit underflow error\n");
 683	if (stat & LTQ_SPI_STAT_AE)
 684		dev_err(spi->dev, "abort error\n");
 685	if (stat & LTQ_SPI_STAT_RE)
 686		dev_err(spi->dev, "receive overflow error\n");
 687	if (stat & LTQ_SPI_STAT_TE)
 688		dev_err(spi->dev, "transmit overflow error\n");
 689	if (stat & LTQ_SPI_STAT_ME)
 690		dev_err(spi->dev, "mode error\n");
 691
 692	/* Clear error flags */
 693	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
 694
 695	/* set bad status so it can be retried */
 696	if (spi->host->cur_msg)
 697		spi->host->cur_msg->status = -EIO;
 698	queue_work(spi->wq, &spi->work);
 699	spin_unlock(&spi->lock);
 700
 701	return IRQ_HANDLED;
 702}
 703
 704static irqreturn_t intel_lgm_ssc_isr(int irq, void *data)
 705{
 706	struct lantiq_ssc_spi *spi = data;
 707	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 708	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
 709
 710	if (!(val & LTQ_SPI_IRNEN_ALL))
 711		return IRQ_NONE;
 712
 713	if (val & LTQ_SPI_IRNEN_E)
 714		return lantiq_ssc_err_interrupt(irq, data);
 715
 716	if ((val & hwcfg->irnen_t) || (val & hwcfg->irnen_r))
 717		return lantiq_ssc_xmit_interrupt(irq, data);
 718
 719	return IRQ_HANDLED;
 720}
 721
 722static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
 723			  struct spi_transfer *t)
 724{
 725	unsigned long flags;
 726
 727	spin_lock_irqsave(&spi->lock, flags);
 728
 729	spi->tx = t->tx_buf;
 730	spi->rx = t->rx_buf;
 731
 732	if (t->tx_buf) {
 733		spi->tx_todo = t->len;
 734
 735		/* initially fill TX FIFO */
 736		tx_fifo_write(spi);
 737	}
 738
 739	if (spi->rx) {
 740		spi->rx_todo = t->len;
 741
 742		/* start shift clock in RX-only mode */
 743		if (!spi->tx)
 744			rx_request(spi);
 745	}
 746
 747	spin_unlock_irqrestore(&spi->lock, flags);
 748
 749	return t->len;
 750}
 751
 752/*
 753 * The driver only gets an interrupt when the FIFO is empty, but there
 754 * is an additional shift register from which the data is written to
 755 * the wire. We get the last interrupt when the controller starts to
 756 * write the last word to the wire, not when it is finished. Do busy
 757 * waiting till it finishes.
 758 */
 759static void lantiq_ssc_bussy_work(struct work_struct *work)
 760{
 761	struct lantiq_ssc_spi *spi;
 762	unsigned long long timeout = 8LL * 1000LL;
 763	unsigned long end;
 764
 765	spi = container_of(work, typeof(*spi), work);
 766
 767	do_div(timeout, spi->speed_hz);
 768	timeout += timeout + 100; /* some tolerance */
 769
 770	end = jiffies + msecs_to_jiffies(timeout);
 771	do {
 772		u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
 773
 774		if (!(stat & LTQ_SPI_STAT_BSY)) {
 775			spi_finalize_current_transfer(spi->host);
 776			return;
 777		}
 778
 779		cond_resched();
 780	} while (!time_after_eq(jiffies, end));
 781
 782	if (spi->host->cur_msg)
 783		spi->host->cur_msg->status = -EIO;
 784	spi_finalize_current_transfer(spi->host);
 785}
 786
 787static void lantiq_ssc_handle_err(struct spi_controller *host,
 788				  struct spi_message *message)
 789{
 790	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 791
 792	/* flush FIFOs on timeout */
 793	rx_fifo_flush(spi);
 794	tx_fifo_flush(spi);
 795}
 796
 797static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
 798{
 799	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(spidev->controller);
 800	unsigned int cs = spi_get_chipselect(spidev, 0);
 801	u32 fgpo;
 802
 803	if (!!(spidev->mode & SPI_CS_HIGH) == enable)
 804		fgpo = (1 << (cs - spi->base_cs));
 805	else
 806		fgpo = (1 << (cs - spi->base_cs + LTQ_SPI_FGPO_SETOUTN_S));
 807
 808	lantiq_ssc_writel(spi, fgpo, LTQ_SPI_FPGO);
 809}
 810
 811static int lantiq_ssc_transfer_one(struct spi_controller *host,
 812				   struct spi_device *spidev,
 813				   struct spi_transfer *t)
 814{
 815	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 816
 817	hw_setup_transfer(spi, spidev, t);
 818
 819	return transfer_start(spi, spidev, t);
 820}
 821
 822static int intel_lgm_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
 823{
 824	int irq;
 825
 826	irq = platform_get_irq(pdev, 0);
 827	if (irq < 0)
 828		return irq;
 829
 830	return devm_request_irq(&pdev->dev, irq, intel_lgm_ssc_isr, 0, "spi", spi);
 831}
 832
 833static int lantiq_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
 834{
 835	int irq, err;
 836
 837	irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
 838	if (irq < 0)
 839		return irq;
 840
 841	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
 842			       0, LTQ_SPI_RX_IRQ_NAME, spi);
 843	if (err)
 844		return err;
 845
 846	irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
 847	if (irq < 0)
 848		return irq;
 849
 850	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
 851			       0, LTQ_SPI_TX_IRQ_NAME, spi);
 852
 853	if (err)
 854		return err;
 855
 856	irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
 857	if (irq < 0)
 858		return irq;
 859
 860	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_err_interrupt,
 861			       0, LTQ_SPI_ERR_IRQ_NAME, spi);
 862	return err;
 863}
 864
 865static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
 866	.cfg_irq	= lantiq_cfg_irq,
 867	.irnen_r	= LTQ_SPI_IRNEN_R_XWAY,
 868	.irnen_t	= LTQ_SPI_IRNEN_T_XWAY,
 869	.irnicr		= 0xF8,
 870	.irncr		= 0xFC,
 871	.fifo_size_mask	= GENMASK(5, 0),
 872	.irq_ack	= false,
 873};
 874
 875static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
 876	.cfg_irq	= lantiq_cfg_irq,
 877	.irnen_r	= LTQ_SPI_IRNEN_R_XRX,
 878	.irnen_t	= LTQ_SPI_IRNEN_T_XRX,
 879	.irnicr		= 0xF8,
 880	.irncr		= 0xFC,
 881	.fifo_size_mask	= GENMASK(5, 0),
 882	.irq_ack	= false,
 883};
 884
 885static const struct lantiq_ssc_hwcfg intel_ssc_lgm = {
 886	.cfg_irq	= intel_lgm_cfg_irq,
 887	.irnen_r	= LTQ_SPI_IRNEN_R_XRX,
 888	.irnen_t	= LTQ_SPI_IRNEN_T_XRX,
 889	.irnicr		= 0xFC,
 890	.irncr		= 0xF8,
 891	.fifo_size_mask	= GENMASK(7, 0),
 892	.irq_ack	= true,
 893};
 894
 895static const struct of_device_id lantiq_ssc_match[] = {
 896	{ .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
 897	{ .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
 898	{ .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
 899	{ .compatible = "intel,lgm-spi", .data = &intel_ssc_lgm, },
 900	{},
 901};
 902MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
 903
 904static int lantiq_ssc_probe(struct platform_device *pdev)
 905{
 906	struct device *dev = &pdev->dev;
 907	struct spi_controller *host;
 908	struct lantiq_ssc_spi *spi;
 909	const struct lantiq_ssc_hwcfg *hwcfg;
 910	u32 id, supports_dma, revision;
 911	unsigned int num_cs;
 912	int err;
 913
 914	hwcfg = of_device_get_match_data(dev);
 915
 916	host = spi_alloc_host(dev, sizeof(struct lantiq_ssc_spi));
 917	if (!host)
 918		return -ENOMEM;
 919
 920	spi = spi_controller_get_devdata(host);
 921	spi->host = host;
 922	spi->dev = dev;
 923	spi->hwcfg = hwcfg;
 924	platform_set_drvdata(pdev, spi);
 925	spi->regbase = devm_platform_ioremap_resource(pdev, 0);
 926	if (IS_ERR(spi->regbase)) {
 927		err = PTR_ERR(spi->regbase);
 928		goto err_host_put;
 929	}
 930
 931	err = hwcfg->cfg_irq(pdev, spi);
 932	if (err)
 933		goto err_host_put;
 934
 935	spi->spi_clk = devm_clk_get_enabled(dev, "gate");
 936	if (IS_ERR(spi->spi_clk)) {
 937		err = PTR_ERR(spi->spi_clk);
 938		goto err_host_put;
 939	}
 
 
 
 940
 941	/*
 942	 * Use the old clk_get_fpi() function on Lantiq platform, till it
 943	 * supports common clk.
 944	 */
 945#if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
 946	spi->fpi_clk = clk_get_fpi();
 947#else
 948	spi->fpi_clk = clk_get(dev, "freq");
 949#endif
 950	if (IS_ERR(spi->fpi_clk)) {
 951		err = PTR_ERR(spi->fpi_clk);
 952		goto err_host_put;
 953	}
 954
 955	num_cs = 8;
 956	of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
 957
 958	spi->base_cs = 1;
 959	of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
 960
 961	spin_lock_init(&spi->lock);
 962	spi->bits_per_word = 8;
 963	spi->speed_hz = 0;
 964
 965	host->dev.of_node = pdev->dev.of_node;
 966	host->num_chipselect = num_cs;
 967	host->use_gpio_descriptors = true;
 968	host->setup = lantiq_ssc_setup;
 969	host->set_cs = lantiq_ssc_set_cs;
 970	host->handle_err = lantiq_ssc_handle_err;
 971	host->prepare_message = lantiq_ssc_prepare_message;
 972	host->unprepare_message = lantiq_ssc_unprepare_message;
 973	host->transfer_one = lantiq_ssc_transfer_one;
 974	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
 975			  SPI_LOOP;
 976	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
 977				   SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
 978
 979	spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM);
 980	if (!spi->wq) {
 981		err = -ENOMEM;
 982		goto err_clk_put;
 983	}
 984	INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
 985
 986	id = lantiq_ssc_readl(spi, LTQ_SPI_ID);
 987	spi->tx_fifo_size = (id >> LTQ_SPI_ID_TXFS_S) & hwcfg->fifo_size_mask;
 988	spi->rx_fifo_size = (id >> LTQ_SPI_ID_RXFS_S) & hwcfg->fifo_size_mask;
 989	supports_dma = (id & LTQ_SPI_ID_CFG_M) >> LTQ_SPI_ID_CFG_S;
 990	revision = id & LTQ_SPI_ID_REV_M;
 991
 992	lantiq_ssc_hw_init(spi);
 993
 994	dev_info(dev,
 995		"Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
 996		revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
 997
 998	err = devm_spi_register_controller(dev, host);
 999	if (err) {
1000		dev_err(dev, "failed to register spi host\n");
1001		goto err_wq_destroy;
1002	}
1003
1004	return 0;
1005
1006err_wq_destroy:
1007	destroy_workqueue(spi->wq);
1008err_clk_put:
1009	clk_put(spi->fpi_clk);
1010err_host_put:
1011	spi_controller_put(host);
 
 
1012
1013	return err;
1014}
1015
1016static void lantiq_ssc_remove(struct platform_device *pdev)
1017{
1018	struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
1019
1020	lantiq_ssc_writel(spi, 0, LTQ_SPI_IRNEN);
1021	lantiq_ssc_writel(spi, 0, LTQ_SPI_CLC);
1022	rx_fifo_flush(spi);
1023	tx_fifo_flush(spi);
1024	hw_enter_config_mode(spi);
1025
1026	destroy_workqueue(spi->wq);
 
1027	clk_put(spi->fpi_clk);
 
 
1028}
1029
1030static struct platform_driver lantiq_ssc_driver = {
1031	.probe = lantiq_ssc_probe,
1032	.remove_new = lantiq_ssc_remove,
1033	.driver = {
1034		.name = "spi-lantiq-ssc",
1035		.of_match_table = lantiq_ssc_match,
1036	},
1037};
1038module_platform_driver(lantiq_ssc_driver);
1039
1040MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
1041MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
1042MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
1043MODULE_LICENSE("GPL");
1044MODULE_ALIAS("platform:spi-lantiq-ssc");