Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
   4 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
 
 
 
 
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/of.h>
  10#include <linux/platform_device.h>
  11#include <linux/clk.h>
  12#include <linux/io.h>
  13#include <linux/delay.h>
  14#include <linux/interrupt.h>
  15#include <linux/sched.h>
  16#include <linux/completion.h>
  17#include <linux/spinlock.h>
  18#include <linux/err.h>
 
  19#include <linux/pm_runtime.h>
  20#include <linux/spi/spi.h>
  21
  22#ifdef CONFIG_LANTIQ
  23#include <lantiq_soc.h>
  24#endif
  25
  26#define LTQ_SPI_RX_IRQ_NAME	"spi_rx"
  27#define LTQ_SPI_TX_IRQ_NAME	"spi_tx"
  28#define LTQ_SPI_ERR_IRQ_NAME	"spi_err"
  29#define LTQ_SPI_FRM_IRQ_NAME	"spi_frm"
  30
  31#define LTQ_SPI_CLC		0x00
  32#define LTQ_SPI_PISEL		0x04
  33#define LTQ_SPI_ID		0x08
  34#define LTQ_SPI_CON		0x10
  35#define LTQ_SPI_STAT		0x14
  36#define LTQ_SPI_WHBSTATE	0x18
  37#define LTQ_SPI_TB		0x20
  38#define LTQ_SPI_RB		0x24
  39#define LTQ_SPI_RXFCON		0x30
  40#define LTQ_SPI_TXFCON		0x34
  41#define LTQ_SPI_FSTAT		0x38
  42#define LTQ_SPI_BRT		0x40
  43#define LTQ_SPI_BRSTAT		0x44
  44#define LTQ_SPI_SFCON		0x60
  45#define LTQ_SPI_SFSTAT		0x64
  46#define LTQ_SPI_GPOCON		0x70
  47#define LTQ_SPI_GPOSTAT		0x74
  48#define LTQ_SPI_FPGO		0x78
  49#define LTQ_SPI_RXREQ		0x80
  50#define LTQ_SPI_RXCNT		0x84
  51#define LTQ_SPI_DMACON		0xec
  52#define LTQ_SPI_IRNEN		0xf4
 
 
  53
  54#define LTQ_SPI_CLC_SMC_S	16	/* Clock divider for sleep mode */
  55#define LTQ_SPI_CLC_SMC_M	(0xFF << LTQ_SPI_CLC_SMC_S)
  56#define LTQ_SPI_CLC_RMC_S	8	/* Clock divider for normal run mode */
  57#define LTQ_SPI_CLC_RMC_M	(0xFF << LTQ_SPI_CLC_RMC_S)
  58#define LTQ_SPI_CLC_DISS	BIT(1)	/* Disable status bit */
  59#define LTQ_SPI_CLC_DISR	BIT(0)	/* Disable request bit */
  60
  61#define LTQ_SPI_ID_TXFS_S	24	/* Implemented TX FIFO size */
 
  62#define LTQ_SPI_ID_RXFS_S	16	/* Implemented RX FIFO size */
 
  63#define LTQ_SPI_ID_MOD_S	8	/* Module ID */
  64#define LTQ_SPI_ID_MOD_M	(0xff << LTQ_SPI_ID_MOD_S)
  65#define LTQ_SPI_ID_CFG_S	5	/* DMA interface support */
  66#define LTQ_SPI_ID_CFG_M	(1 << LTQ_SPI_ID_CFG_S)
  67#define LTQ_SPI_ID_REV_M	0x1F	/* Hardware revision number */
  68
  69#define LTQ_SPI_CON_BM_S	16	/* Data width selection */
  70#define LTQ_SPI_CON_BM_M	(0x1F << LTQ_SPI_CON_BM_S)
  71#define LTQ_SPI_CON_EM		BIT(24)	/* Echo mode */
  72#define LTQ_SPI_CON_IDLE	BIT(23)	/* Idle bit value */
  73#define LTQ_SPI_CON_ENBV	BIT(22)	/* Enable byte valid control */
  74#define LTQ_SPI_CON_RUEN	BIT(12)	/* Receive underflow error enable */
  75#define LTQ_SPI_CON_TUEN	BIT(11)	/* Transmit underflow error enable */
  76#define LTQ_SPI_CON_AEN		BIT(10)	/* Abort error enable */
  77#define LTQ_SPI_CON_REN		BIT(9)	/* Receive overflow error enable */
  78#define LTQ_SPI_CON_TEN		BIT(8)	/* Transmit overflow error enable */
  79#define LTQ_SPI_CON_LB		BIT(7)	/* Loopback control */
  80#define LTQ_SPI_CON_PO		BIT(6)	/* Clock polarity control */
  81#define LTQ_SPI_CON_PH		BIT(5)	/* Clock phase control */
  82#define LTQ_SPI_CON_HB		BIT(4)	/* Heading control */
  83#define LTQ_SPI_CON_RXOFF	BIT(1)	/* Switch receiver off */
  84#define LTQ_SPI_CON_TXOFF	BIT(0)	/* Switch transmitter off */
  85
  86#define LTQ_SPI_STAT_RXBV_S	28
  87#define LTQ_SPI_STAT_RXBV_M	(0x7 << LTQ_SPI_STAT_RXBV_S)
  88#define LTQ_SPI_STAT_BSY	BIT(13)	/* Busy flag */
  89#define LTQ_SPI_STAT_RUE	BIT(12)	/* Receive underflow error flag */
  90#define LTQ_SPI_STAT_TUE	BIT(11)	/* Transmit underflow error flag */
  91#define LTQ_SPI_STAT_AE		BIT(10)	/* Abort error flag */
  92#define LTQ_SPI_STAT_RE		BIT(9)	/* Receive error flag */
  93#define LTQ_SPI_STAT_TE		BIT(8)	/* Transmit error flag */
  94#define LTQ_SPI_STAT_ME		BIT(7)	/* Mode error flag */
  95#define LTQ_SPI_STAT_MS		BIT(1)	/* Host/target select bit */
  96#define LTQ_SPI_STAT_EN		BIT(0)	/* Enable bit */
  97#define LTQ_SPI_STAT_ERRORS	(LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
  98				 LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
  99				 LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE)
 100
 101#define LTQ_SPI_WHBSTATE_SETTUE	BIT(15)	/* Set transmit underflow error flag */
 102#define LTQ_SPI_WHBSTATE_SETAE	BIT(14)	/* Set abort error flag */
 103#define LTQ_SPI_WHBSTATE_SETRE	BIT(13)	/* Set receive error flag */
 104#define LTQ_SPI_WHBSTATE_SETTE	BIT(12)	/* Set transmit error flag */
 105#define LTQ_SPI_WHBSTATE_CLRTUE	BIT(11)	/* Clear transmit underflow error flag */
 106#define LTQ_SPI_WHBSTATE_CLRAE	BIT(10)	/* Clear abort error flag */
 107#define LTQ_SPI_WHBSTATE_CLRRE	BIT(9)	/* Clear receive error flag */
 108#define LTQ_SPI_WHBSTATE_CLRTE	BIT(8)	/* Clear transmit error flag */
 109#define LTQ_SPI_WHBSTATE_SETME	BIT(7)	/* Set mode error flag */
 110#define LTQ_SPI_WHBSTATE_CLRME	BIT(6)	/* Clear mode error flag */
 111#define LTQ_SPI_WHBSTATE_SETRUE	BIT(5)	/* Set receive underflow error flag */
 112#define LTQ_SPI_WHBSTATE_CLRRUE	BIT(4)	/* Clear receive underflow error flag */
 113#define LTQ_SPI_WHBSTATE_SETMS	BIT(3)	/* Set host select bit */
 114#define LTQ_SPI_WHBSTATE_CLRMS	BIT(2)	/* Clear host select bit */
 115#define LTQ_SPI_WHBSTATE_SETEN	BIT(1)	/* Set enable bit (operational mode) */
 116#define LTQ_SPI_WHBSTATE_CLREN	BIT(0)	/* Clear enable bit (config mode */
 117#define LTQ_SPI_WHBSTATE_CLR_ERRORS	(LTQ_SPI_WHBSTATE_CLRRUE | \
 118					 LTQ_SPI_WHBSTATE_CLRME | \
 119					 LTQ_SPI_WHBSTATE_CLRTE | \
 120					 LTQ_SPI_WHBSTATE_CLRRE | \
 121					 LTQ_SPI_WHBSTATE_CLRAE | \
 122					 LTQ_SPI_WHBSTATE_CLRTUE)
 123
 124#define LTQ_SPI_RXFCON_RXFITL_S	8	/* FIFO interrupt trigger level */
 
 125#define LTQ_SPI_RXFCON_RXFLU	BIT(1)	/* FIFO flush */
 126#define LTQ_SPI_RXFCON_RXFEN	BIT(0)	/* FIFO enable */
 127
 128#define LTQ_SPI_TXFCON_TXFITL_S	8	/* FIFO interrupt trigger level */
 
 129#define LTQ_SPI_TXFCON_TXFLU	BIT(1)	/* FIFO flush */
 130#define LTQ_SPI_TXFCON_TXFEN	BIT(0)	/* FIFO enable */
 131
 132#define LTQ_SPI_FSTAT_RXFFL_S	0
 
 133#define LTQ_SPI_FSTAT_TXFFL_S	8
 
 134
 135#define LTQ_SPI_GPOCON_ISCSBN_S	8
 136#define LTQ_SPI_GPOCON_INVOUTN_S	0
 137
 138#define LTQ_SPI_FGPO_SETOUTN_S	8
 139#define LTQ_SPI_FGPO_CLROUTN_S	0
 140
 141#define LTQ_SPI_RXREQ_RXCNT_M	0xFFFF	/* Receive count value */
 142#define LTQ_SPI_RXCNT_TODO_M	0xFFFF	/* Receive to-do value */
 143
 144#define LTQ_SPI_IRNEN_TFI	BIT(4)	/* TX finished interrupt */
 145#define LTQ_SPI_IRNEN_F		BIT(3)	/* Frame end interrupt request */
 146#define LTQ_SPI_IRNEN_E		BIT(2)	/* Error end interrupt request */
 147#define LTQ_SPI_IRNEN_T_XWAY	BIT(1)	/* Transmit end interrupt request */
 148#define LTQ_SPI_IRNEN_R_XWAY	BIT(0)	/* Receive end interrupt request */
 149#define LTQ_SPI_IRNEN_R_XRX	BIT(1)	/* Transmit end interrupt request */
 150#define LTQ_SPI_IRNEN_T_XRX	BIT(0)	/* Receive end interrupt request */
 151#define LTQ_SPI_IRNEN_ALL	0x1F
 152
 153struct lantiq_ssc_spi;
 154
 155struct lantiq_ssc_hwcfg {
 156	int (*cfg_irq)(struct platform_device *pdev, struct lantiq_ssc_spi *spi);
 157	unsigned int	irnen_r;
 158	unsigned int	irnen_t;
 159	unsigned int	irncr;
 160	unsigned int	irnicr;
 161	bool		irq_ack;
 162	u32		fifo_size_mask;
 163};
 164
 165struct lantiq_ssc_spi {
 166	struct spi_controller		*host;
 167	struct device			*dev;
 168	void __iomem			*regbase;
 169	struct clk			*spi_clk;
 170	struct clk			*fpi_clk;
 171	const struct lantiq_ssc_hwcfg	*hwcfg;
 172
 173	spinlock_t			lock;
 174	struct workqueue_struct		*wq;
 175	struct work_struct		work;
 176
 177	const u8			*tx;
 178	u8				*rx;
 179	unsigned int			tx_todo;
 180	unsigned int			rx_todo;
 181	unsigned int			bits_per_word;
 182	unsigned int			speed_hz;
 183	unsigned int			tx_fifo_size;
 184	unsigned int			rx_fifo_size;
 185	unsigned int			base_cs;
 186	unsigned int			fdx_tx_level;
 187};
 188
 189static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
 190{
 191	return __raw_readl(spi->regbase + reg);
 192}
 193
 194static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
 195			      u32 reg)
 196{
 197	__raw_writel(val, spi->regbase + reg);
 198}
 199
 200static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
 201			     u32 set, u32 reg)
 202{
 203	u32 val = __raw_readl(spi->regbase + reg);
 204
 205	val &= ~clr;
 206	val |= set;
 207	__raw_writel(val, spi->regbase + reg);
 208}
 209
 210static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
 211{
 212	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 213	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
 214
 215	return (fstat >> LTQ_SPI_FSTAT_TXFFL_S) & hwcfg->fifo_size_mask;
 216}
 217
 218static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
 219{
 220	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 221	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
 222
 223	return (fstat >> LTQ_SPI_FSTAT_RXFFL_S) & hwcfg->fifo_size_mask;
 224}
 225
 226static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
 227{
 228	return spi->tx_fifo_size - tx_fifo_level(spi);
 229}
 230
 231static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
 232{
 233	u32 val = spi->rx_fifo_size << LTQ_SPI_RXFCON_RXFITL_S;
 234
 235	val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
 236	lantiq_ssc_writel(spi, val, LTQ_SPI_RXFCON);
 237}
 238
 239static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
 240{
 241	u32 val = 1 << LTQ_SPI_TXFCON_TXFITL_S;
 242
 243	val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
 244	lantiq_ssc_writel(spi, val, LTQ_SPI_TXFCON);
 245}
 246
 247static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
 248{
 249	lantiq_ssc_maskl(spi, 0, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
 250}
 251
 252static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
 253{
 254	lantiq_ssc_maskl(spi, 0, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
 255}
 256
 257static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
 258{
 259	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
 260}
 261
 262static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
 263{
 264	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
 265}
 266
 267static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
 268			      unsigned int max_speed_hz)
 269{
 270	u32 spi_clk, brt;
 271
 272	/*
 273	 * SPI module clock is derived from FPI bus clock dependent on
 274	 * divider value in CLC.RMS which is always set to 1.
 275	 *
 276	 *                 f_SPI
 277	 * baudrate = --------------
 278	 *             2 * (BR + 1)
 279	 */
 280	spi_clk = clk_get_rate(spi->fpi_clk) / 2;
 281
 282	if (max_speed_hz > spi_clk)
 283		brt = 0;
 284	else
 285		brt = spi_clk / max_speed_hz - 1;
 286
 287	if (brt > 0xFFFF)
 288		brt = 0xFFFF;
 289
 290	dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
 291		spi_clk, max_speed_hz, brt);
 292
 293	lantiq_ssc_writel(spi, brt, LTQ_SPI_BRT);
 294}
 295
 296static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
 297				   unsigned int bits_per_word)
 298{
 299	u32 bm;
 300
 301	/* CON.BM value = bits_per_word - 1 */
 302	bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_S;
 303
 304	lantiq_ssc_maskl(spi, LTQ_SPI_CON_BM_M, bm, LTQ_SPI_CON);
 305}
 306
 307static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
 308				unsigned int mode)
 309{
 310	u32 con_set = 0, con_clr = 0;
 311
 312	/*
 313	 * SPI mode mapping in CON register:
 314	 * Mode CPOL CPHA CON.PO CON.PH
 315	 *  0    0    0      0      1
 316	 *  1    0    1      0      0
 317	 *  2    1    0      1      1
 318	 *  3    1    1      1      0
 319	 */
 320	if (mode & SPI_CPHA)
 321		con_clr |= LTQ_SPI_CON_PH;
 322	else
 323		con_set |= LTQ_SPI_CON_PH;
 324
 325	if (mode & SPI_CPOL)
 326		con_set |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
 327	else
 328		con_clr |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
 329
 330	/* Set heading control */
 331	if (mode & SPI_LSB_FIRST)
 332		con_clr |= LTQ_SPI_CON_HB;
 333	else
 334		con_set |= LTQ_SPI_CON_HB;
 335
 336	/* Set loopback mode */
 337	if (mode & SPI_LOOP)
 338		con_set |= LTQ_SPI_CON_LB;
 339	else
 340		con_clr |= LTQ_SPI_CON_LB;
 341
 342	lantiq_ssc_maskl(spi, con_clr, con_set, LTQ_SPI_CON);
 343}
 344
 345static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
 346{
 347	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 348
 349	/*
 350	 * Set clock divider for run mode to 1 to
 351	 * run at same frequency as FPI bus
 352	 */
 353	lantiq_ssc_writel(spi, 1 << LTQ_SPI_CLC_RMC_S, LTQ_SPI_CLC);
 354
 355	/* Put controller into config mode */
 356	hw_enter_config_mode(spi);
 357
 358	/* Clear error flags */
 359	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
 360
 361	/* Enable error checking, disable TX/RX */
 362	lantiq_ssc_writel(spi, LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
 363		LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN | LTQ_SPI_CON_TXOFF |
 364		LTQ_SPI_CON_RXOFF, LTQ_SPI_CON);
 365
 366	/* Setup default SPI mode */
 367	hw_setup_bits_per_word(spi, spi->bits_per_word);
 368	hw_setup_clock_mode(spi, SPI_MODE_0);
 369
 370	/* Enable host mode and clear error flags */
 371	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETMS |
 372			       LTQ_SPI_WHBSTATE_CLR_ERRORS,
 373			       LTQ_SPI_WHBSTATE);
 374
 375	/* Reset GPIO/CS registers */
 376	lantiq_ssc_writel(spi, 0, LTQ_SPI_GPOCON);
 377	lantiq_ssc_writel(spi, 0xFF00, LTQ_SPI_FPGO);
 378
 379	/* Enable and flush FIFOs */
 380	rx_fifo_reset(spi);
 381	tx_fifo_reset(spi);
 382
 383	/* Enable interrupts */
 384	lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r |
 385			  LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
 386}
 387
 388static int lantiq_ssc_setup(struct spi_device *spidev)
 389{
 390	struct spi_controller *host = spidev->controller;
 391	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 392	unsigned int cs = spi_get_chipselect(spidev, 0);
 393	u32 gpocon;
 394
 395	/* GPIOs are used for CS */
 396	if (spi_get_csgpiod(spidev, 0))
 397		return 0;
 398
 399	dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
 400
 401	if (cs < spi->base_cs) {
 402		dev_err(spi->dev,
 403			"chipselect %i too small (min %i)\n", cs, spi->base_cs);
 404		return -EINVAL;
 405	}
 406
 407	/* set GPO pin to CS mode */
 408	gpocon = 1 << ((cs - spi->base_cs) + LTQ_SPI_GPOCON_ISCSBN_S);
 409
 410	/* invert GPO pin */
 411	if (spidev->mode & SPI_CS_HIGH)
 412		gpocon |= 1 << (cs - spi->base_cs);
 413
 414	lantiq_ssc_maskl(spi, 0, gpocon, LTQ_SPI_GPOCON);
 415
 416	return 0;
 417}
 418
 419static int lantiq_ssc_prepare_message(struct spi_controller *host,
 420				      struct spi_message *message)
 421{
 422	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 423
 424	hw_enter_config_mode(spi);
 425	hw_setup_clock_mode(spi, message->spi->mode);
 426	hw_enter_active_mode(spi);
 427
 428	return 0;
 429}
 430
 431static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
 432			      struct spi_device *spidev, struct spi_transfer *t)
 433{
 434	unsigned int speed_hz = t->speed_hz;
 435	unsigned int bits_per_word = t->bits_per_word;
 436	u32 con;
 437
 438	if (bits_per_word != spi->bits_per_word ||
 439		speed_hz != spi->speed_hz) {
 440		hw_enter_config_mode(spi);
 441		hw_setup_speed_hz(spi, speed_hz);
 442		hw_setup_bits_per_word(spi, bits_per_word);
 443		hw_enter_active_mode(spi);
 444
 445		spi->speed_hz = speed_hz;
 446		spi->bits_per_word = bits_per_word;
 447	}
 448
 449	/* Configure transmitter and receiver */
 450	con = lantiq_ssc_readl(spi, LTQ_SPI_CON);
 451	if (t->tx_buf)
 452		con &= ~LTQ_SPI_CON_TXOFF;
 453	else
 454		con |= LTQ_SPI_CON_TXOFF;
 455
 456	if (t->rx_buf)
 457		con &= ~LTQ_SPI_CON_RXOFF;
 458	else
 459		con |= LTQ_SPI_CON_RXOFF;
 460
 461	lantiq_ssc_writel(spi, con, LTQ_SPI_CON);
 462}
 463
 464static int lantiq_ssc_unprepare_message(struct spi_controller *host,
 465					struct spi_message *message)
 466{
 467	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 468
 469	flush_workqueue(spi->wq);
 470
 471	/* Disable transmitter and receiver while idle */
 472	lantiq_ssc_maskl(spi, 0, LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF,
 473			 LTQ_SPI_CON);
 474
 475	return 0;
 476}
 477
 478static void tx_fifo_write(struct lantiq_ssc_spi *spi)
 479{
 480	const u8 *tx8;
 481	const u16 *tx16;
 482	const u32 *tx32;
 483	u32 data;
 484	unsigned int tx_free = tx_fifo_free(spi);
 485
 486	spi->fdx_tx_level = 0;
 487	while (spi->tx_todo && tx_free) {
 488		switch (spi->bits_per_word) {
 489		case 2 ... 8:
 490			tx8 = spi->tx;
 491			data = *tx8;
 492			spi->tx_todo--;
 493			spi->tx++;
 494			break;
 495		case 16:
 496			tx16 = (u16 *) spi->tx;
 497			data = *tx16;
 498			spi->tx_todo -= 2;
 499			spi->tx += 2;
 500			break;
 501		case 32:
 502			tx32 = (u32 *) spi->tx;
 503			data = *tx32;
 504			spi->tx_todo -= 4;
 505			spi->tx += 4;
 506			break;
 507		default:
 508			WARN_ON(1);
 509			data = 0;
 510			break;
 511		}
 512
 513		lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
 514		tx_free--;
 515		spi->fdx_tx_level++;
 516	}
 517}
 518
 519static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
 520{
 521	u8 *rx8;
 522	u16 *rx16;
 523	u32 *rx32;
 524	u32 data;
 525	unsigned int rx_fill = rx_fifo_level(spi);
 526
 527	/*
 528	 * Wait until all expected data to be shifted in.
 529	 * Otherwise, rx overrun may occur.
 530	 */
 531	while (rx_fill != spi->fdx_tx_level)
 532		rx_fill = rx_fifo_level(spi);
 533
 534	while (rx_fill) {
 535		data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
 536
 537		switch (spi->bits_per_word) {
 538		case 2 ... 8:
 539			rx8 = spi->rx;
 540			*rx8 = data;
 541			spi->rx_todo--;
 542			spi->rx++;
 543			break;
 544		case 16:
 545			rx16 = (u16 *) spi->rx;
 546			*rx16 = data;
 547			spi->rx_todo -= 2;
 548			spi->rx += 2;
 549			break;
 550		case 32:
 551			rx32 = (u32 *) spi->rx;
 552			*rx32 = data;
 553			spi->rx_todo -= 4;
 554			spi->rx += 4;
 555			break;
 556		default:
 557			WARN_ON(1);
 558			break;
 559		}
 560
 561		rx_fill--;
 562	}
 563}
 564
 565static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
 566{
 567	u32 data, *rx32;
 568	u8 *rx8;
 569	unsigned int rxbv, shift;
 570	unsigned int rx_fill = rx_fifo_level(spi);
 571
 572	/*
 573	 * In RX-only mode the bits per word value is ignored by HW. A value
 574	 * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
 575	 * If remaining RX bytes are less than 4, the FIFO must be read
 576	 * differently. The amount of received and valid bytes is indicated
 577	 * by STAT.RXBV register value.
 578	 */
 579	while (rx_fill) {
 580		if (spi->rx_todo < 4)  {
 581			rxbv = (lantiq_ssc_readl(spi, LTQ_SPI_STAT) &
 582				LTQ_SPI_STAT_RXBV_M) >> LTQ_SPI_STAT_RXBV_S;
 583			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
 584
 585			shift = (rxbv - 1) * 8;
 586			rx8 = spi->rx;
 587
 588			while (rxbv) {
 589				*rx8++ = (data >> shift) & 0xFF;
 590				rxbv--;
 591				shift -= 8;
 592				spi->rx_todo--;
 593				spi->rx++;
 594			}
 595		} else {
 596			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
 597			rx32 = (u32 *) spi->rx;
 598
 599			*rx32++ = data;
 600			spi->rx_todo -= 4;
 601			spi->rx += 4;
 602		}
 603		rx_fill--;
 604	}
 605}
 606
 607static void rx_request(struct lantiq_ssc_spi *spi)
 608{
 609	unsigned int rxreq, rxreq_max;
 610
 611	/*
 612	 * To avoid receive overflows at high clocks it is better to request
 613	 * only the amount of bytes that fits into all FIFOs. This value
 614	 * depends on the FIFO size implemented in hardware.
 615	 */
 616	rxreq = spi->rx_todo;
 617	rxreq_max = spi->rx_fifo_size * 4;
 618	if (rxreq > rxreq_max)
 619		rxreq = rxreq_max;
 620
 621	lantiq_ssc_writel(spi, rxreq, LTQ_SPI_RXREQ);
 622}
 623
 624static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
 625{
 626	struct lantiq_ssc_spi *spi = data;
 627	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 628	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
 629
 630	spin_lock(&spi->lock);
 631	if (hwcfg->irq_ack)
 632		lantiq_ssc_writel(spi, val, hwcfg->irncr);
 633
 634	if (spi->tx) {
 635		if (spi->rx && spi->rx_todo)
 636			rx_fifo_read_full_duplex(spi);
 637
 638		if (spi->tx_todo)
 639			tx_fifo_write(spi);
 640		else if (!tx_fifo_level(spi))
 641			goto completed;
 642	} else if (spi->rx) {
 643		if (spi->rx_todo) {
 644			rx_fifo_read_half_duplex(spi);
 645
 646			if (spi->rx_todo)
 647				rx_request(spi);
 648			else
 649				goto completed;
 650		} else {
 651			goto completed;
 652		}
 653	}
 654
 655	spin_unlock(&spi->lock);
 656	return IRQ_HANDLED;
 657
 658completed:
 659	queue_work(spi->wq, &spi->work);
 660	spin_unlock(&spi->lock);
 661
 662	return IRQ_HANDLED;
 663}
 664
 665static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
 666{
 667	struct lantiq_ssc_spi *spi = data;
 668	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 669	u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
 670	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
 671
 672	if (!(stat & LTQ_SPI_STAT_ERRORS))
 673		return IRQ_NONE;
 674
 675	spin_lock(&spi->lock);
 676	if (hwcfg->irq_ack)
 677		lantiq_ssc_writel(spi, val, hwcfg->irncr);
 678
 679	if (stat & LTQ_SPI_STAT_RUE)
 680		dev_err(spi->dev, "receive underflow error\n");
 681	if (stat & LTQ_SPI_STAT_TUE)
 682		dev_err(spi->dev, "transmit underflow error\n");
 683	if (stat & LTQ_SPI_STAT_AE)
 684		dev_err(spi->dev, "abort error\n");
 685	if (stat & LTQ_SPI_STAT_RE)
 686		dev_err(spi->dev, "receive overflow error\n");
 687	if (stat & LTQ_SPI_STAT_TE)
 688		dev_err(spi->dev, "transmit overflow error\n");
 689	if (stat & LTQ_SPI_STAT_ME)
 690		dev_err(spi->dev, "mode error\n");
 691
 692	/* Clear error flags */
 693	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
 694
 695	/* set bad status so it can be retried */
 696	if (spi->host->cur_msg)
 697		spi->host->cur_msg->status = -EIO;
 698	queue_work(spi->wq, &spi->work);
 699	spin_unlock(&spi->lock);
 700
 701	return IRQ_HANDLED;
 702}
 703
 704static irqreturn_t intel_lgm_ssc_isr(int irq, void *data)
 705{
 706	struct lantiq_ssc_spi *spi = data;
 707	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
 708	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
 709
 710	if (!(val & LTQ_SPI_IRNEN_ALL))
 711		return IRQ_NONE;
 712
 713	if (val & LTQ_SPI_IRNEN_E)
 714		return lantiq_ssc_err_interrupt(irq, data);
 715
 716	if ((val & hwcfg->irnen_t) || (val & hwcfg->irnen_r))
 717		return lantiq_ssc_xmit_interrupt(irq, data);
 718
 719	return IRQ_HANDLED;
 720}
 721
 722static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
 723			  struct spi_transfer *t)
 724{
 725	unsigned long flags;
 726
 727	spin_lock_irqsave(&spi->lock, flags);
 728
 729	spi->tx = t->tx_buf;
 730	spi->rx = t->rx_buf;
 731
 732	if (t->tx_buf) {
 733		spi->tx_todo = t->len;
 734
 735		/* initially fill TX FIFO */
 736		tx_fifo_write(spi);
 737	}
 738
 739	if (spi->rx) {
 740		spi->rx_todo = t->len;
 741
 742		/* start shift clock in RX-only mode */
 743		if (!spi->tx)
 744			rx_request(spi);
 745	}
 746
 747	spin_unlock_irqrestore(&spi->lock, flags);
 748
 749	return t->len;
 750}
 751
 752/*
 753 * The driver only gets an interrupt when the FIFO is empty, but there
 754 * is an additional shift register from which the data is written to
 755 * the wire. We get the last interrupt when the controller starts to
 756 * write the last word to the wire, not when it is finished. Do busy
 757 * waiting till it finishes.
 758 */
 759static void lantiq_ssc_bussy_work(struct work_struct *work)
 760{
 761	struct lantiq_ssc_spi *spi;
 762	unsigned long long timeout = 8LL * 1000LL;
 763	unsigned long end;
 764
 765	spi = container_of(work, typeof(*spi), work);
 766
 767	do_div(timeout, spi->speed_hz);
 768	timeout += timeout + 100; /* some tolerance */
 769
 770	end = jiffies + msecs_to_jiffies(timeout);
 771	do {
 772		u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
 773
 774		if (!(stat & LTQ_SPI_STAT_BSY)) {
 775			spi_finalize_current_transfer(spi->host);
 776			return;
 777		}
 778
 779		cond_resched();
 780	} while (!time_after_eq(jiffies, end));
 781
 782	if (spi->host->cur_msg)
 783		spi->host->cur_msg->status = -EIO;
 784	spi_finalize_current_transfer(spi->host);
 785}
 786
 787static void lantiq_ssc_handle_err(struct spi_controller *host,
 788				  struct spi_message *message)
 789{
 790	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 791
 792	/* flush FIFOs on timeout */
 793	rx_fifo_flush(spi);
 794	tx_fifo_flush(spi);
 795}
 796
 797static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
 798{
 799	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(spidev->controller);
 800	unsigned int cs = spi_get_chipselect(spidev, 0);
 801	u32 fgpo;
 802
 803	if (!!(spidev->mode & SPI_CS_HIGH) == enable)
 804		fgpo = (1 << (cs - spi->base_cs));
 805	else
 806		fgpo = (1 << (cs - spi->base_cs + LTQ_SPI_FGPO_SETOUTN_S));
 807
 808	lantiq_ssc_writel(spi, fgpo, LTQ_SPI_FPGO);
 809}
 810
 811static int lantiq_ssc_transfer_one(struct spi_controller *host,
 812				   struct spi_device *spidev,
 813				   struct spi_transfer *t)
 814{
 815	struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
 816
 817	hw_setup_transfer(spi, spidev, t);
 818
 819	return transfer_start(spi, spidev, t);
 820}
 821
 822static int intel_lgm_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
 823{
 824	int irq;
 825
 826	irq = platform_get_irq(pdev, 0);
 827	if (irq < 0)
 828		return irq;
 829
 830	return devm_request_irq(&pdev->dev, irq, intel_lgm_ssc_isr, 0, "spi", spi);
 831}
 832
 833static int lantiq_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
 834{
 835	int irq, err;
 836
 837	irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
 838	if (irq < 0)
 839		return irq;
 840
 841	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
 842			       0, LTQ_SPI_RX_IRQ_NAME, spi);
 843	if (err)
 844		return err;
 845
 846	irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
 847	if (irq < 0)
 848		return irq;
 849
 850	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
 851			       0, LTQ_SPI_TX_IRQ_NAME, spi);
 852
 853	if (err)
 854		return err;
 855
 856	irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
 857	if (irq < 0)
 858		return irq;
 859
 860	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_err_interrupt,
 861			       0, LTQ_SPI_ERR_IRQ_NAME, spi);
 862	return err;
 863}
 864
 865static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
 866	.cfg_irq	= lantiq_cfg_irq,
 867	.irnen_r	= LTQ_SPI_IRNEN_R_XWAY,
 868	.irnen_t	= LTQ_SPI_IRNEN_T_XWAY,
 869	.irnicr		= 0xF8,
 870	.irncr		= 0xFC,
 871	.fifo_size_mask	= GENMASK(5, 0),
 872	.irq_ack	= false,
 873};
 874
 875static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
 876	.cfg_irq	= lantiq_cfg_irq,
 877	.irnen_r	= LTQ_SPI_IRNEN_R_XRX,
 878	.irnen_t	= LTQ_SPI_IRNEN_T_XRX,
 879	.irnicr		= 0xF8,
 880	.irncr		= 0xFC,
 881	.fifo_size_mask	= GENMASK(5, 0),
 882	.irq_ack	= false,
 883};
 884
 885static const struct lantiq_ssc_hwcfg intel_ssc_lgm = {
 886	.cfg_irq	= intel_lgm_cfg_irq,
 887	.irnen_r	= LTQ_SPI_IRNEN_R_XRX,
 888	.irnen_t	= LTQ_SPI_IRNEN_T_XRX,
 889	.irnicr		= 0xFC,
 890	.irncr		= 0xF8,
 891	.fifo_size_mask	= GENMASK(7, 0),
 892	.irq_ack	= true,
 893};
 894
 895static const struct of_device_id lantiq_ssc_match[] = {
 896	{ .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
 897	{ .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
 898	{ .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
 899	{ .compatible = "intel,lgm-spi", .data = &intel_ssc_lgm, },
 900	{},
 901};
 902MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
 903
 904static int lantiq_ssc_probe(struct platform_device *pdev)
 905{
 906	struct device *dev = &pdev->dev;
 907	struct spi_controller *host;
 
 908	struct lantiq_ssc_spi *spi;
 909	const struct lantiq_ssc_hwcfg *hwcfg;
 
 
 910	u32 id, supports_dma, revision;
 911	unsigned int num_cs;
 912	int err;
 913
 914	hwcfg = of_device_get_match_data(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 915
 916	host = spi_alloc_host(dev, sizeof(struct lantiq_ssc_spi));
 917	if (!host)
 918		return -ENOMEM;
 919
 920	spi = spi_controller_get_devdata(host);
 921	spi->host = host;
 922	spi->dev = dev;
 923	spi->hwcfg = hwcfg;
 924	platform_set_drvdata(pdev, spi);
 925	spi->regbase = devm_platform_ioremap_resource(pdev, 0);
 
 926	if (IS_ERR(spi->regbase)) {
 927		err = PTR_ERR(spi->regbase);
 928		goto err_host_put;
 929	}
 930
 931	err = hwcfg->cfg_irq(pdev, spi);
 
 
 
 
 
 
 
 
 
 
 
 932	if (err)
 933		goto err_host_put;
 934
 935	spi->spi_clk = devm_clk_get_enabled(dev, "gate");
 936	if (IS_ERR(spi->spi_clk)) {
 937		err = PTR_ERR(spi->spi_clk);
 938		goto err_host_put;
 939	}
 
 
 
 940
 941	/*
 942	 * Use the old clk_get_fpi() function on Lantiq platform, till it
 943	 * supports common clk.
 944	 */
 945#if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
 946	spi->fpi_clk = clk_get_fpi();
 947#else
 948	spi->fpi_clk = clk_get(dev, "freq");
 949#endif
 950	if (IS_ERR(spi->fpi_clk)) {
 951		err = PTR_ERR(spi->fpi_clk);
 952		goto err_host_put;
 953	}
 954
 955	num_cs = 8;
 956	of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
 957
 958	spi->base_cs = 1;
 959	of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
 960
 961	spin_lock_init(&spi->lock);
 962	spi->bits_per_word = 8;
 963	spi->speed_hz = 0;
 964
 965	host->dev.of_node = pdev->dev.of_node;
 966	host->num_chipselect = num_cs;
 967	host->use_gpio_descriptors = true;
 968	host->setup = lantiq_ssc_setup;
 969	host->set_cs = lantiq_ssc_set_cs;
 970	host->handle_err = lantiq_ssc_handle_err;
 971	host->prepare_message = lantiq_ssc_prepare_message;
 972	host->unprepare_message = lantiq_ssc_unprepare_message;
 973	host->transfer_one = lantiq_ssc_transfer_one;
 974	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
 975			  SPI_LOOP;
 976	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
 977				   SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
 978
 979	spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM);
 980	if (!spi->wq) {
 981		err = -ENOMEM;
 982		goto err_clk_put;
 983	}
 984	INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
 985
 986	id = lantiq_ssc_readl(spi, LTQ_SPI_ID);
 987	spi->tx_fifo_size = (id >> LTQ_SPI_ID_TXFS_S) & hwcfg->fifo_size_mask;
 988	spi->rx_fifo_size = (id >> LTQ_SPI_ID_RXFS_S) & hwcfg->fifo_size_mask;
 989	supports_dma = (id & LTQ_SPI_ID_CFG_M) >> LTQ_SPI_ID_CFG_S;
 990	revision = id & LTQ_SPI_ID_REV_M;
 991
 992	lantiq_ssc_hw_init(spi);
 993
 994	dev_info(dev,
 995		"Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
 996		revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
 997
 998	err = devm_spi_register_controller(dev, host);
 999	if (err) {
1000		dev_err(dev, "failed to register spi host\n");
1001		goto err_wq_destroy;
1002	}
1003
1004	return 0;
1005
1006err_wq_destroy:
1007	destroy_workqueue(spi->wq);
1008err_clk_put:
1009	clk_put(spi->fpi_clk);
1010err_host_put:
1011	spi_controller_put(host);
 
 
1012
1013	return err;
1014}
1015
1016static void lantiq_ssc_remove(struct platform_device *pdev)
1017{
1018	struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
1019
1020	lantiq_ssc_writel(spi, 0, LTQ_SPI_IRNEN);
1021	lantiq_ssc_writel(spi, 0, LTQ_SPI_CLC);
1022	rx_fifo_flush(spi);
1023	tx_fifo_flush(spi);
1024	hw_enter_config_mode(spi);
1025
1026	destroy_workqueue(spi->wq);
 
1027	clk_put(spi->fpi_clk);
 
 
1028}
1029
1030static struct platform_driver lantiq_ssc_driver = {
1031	.probe = lantiq_ssc_probe,
1032	.remove = lantiq_ssc_remove,
1033	.driver = {
1034		.name = "spi-lantiq-ssc",
1035		.of_match_table = lantiq_ssc_match,
1036	},
1037};
1038module_platform_driver(lantiq_ssc_driver);
1039
1040MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
1041MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
1042MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
1043MODULE_LICENSE("GPL");
1044MODULE_ALIAS("platform:spi-lantiq-ssc");
v4.17
 
  1/*
  2 * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
  3 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
  4 *
  5 * This program is free software; you can distribute it and/or modify it
  6 * under the terms of the GNU General Public License (Version 2) as
  7 * published by the Free Software Foundation.
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/of_device.h>
 
 13#include <linux/clk.h>
 14#include <linux/io.h>
 15#include <linux/delay.h>
 16#include <linux/interrupt.h>
 17#include <linux/sched.h>
 18#include <linux/completion.h>
 19#include <linux/spinlock.h>
 20#include <linux/err.h>
 21#include <linux/gpio.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/spi/spi.h>
 24
 25#ifdef CONFIG_LANTIQ
 26#include <lantiq_soc.h>
 27#endif
 28
 29#define LTQ_SPI_RX_IRQ_NAME	"spi_rx"
 30#define LTQ_SPI_TX_IRQ_NAME	"spi_tx"
 31#define LTQ_SPI_ERR_IRQ_NAME	"spi_err"
 32#define LTQ_SPI_FRM_IRQ_NAME	"spi_frm"
 33
 34#define LTQ_SPI_CLC		0x00
 35#define LTQ_SPI_PISEL		0x04
 36#define LTQ_SPI_ID		0x08
 37#define LTQ_SPI_CON		0x10
 38#define LTQ_SPI_STAT		0x14
 39#define LTQ_SPI_WHBSTATE	0x18
 40#define LTQ_SPI_TB		0x20
 41#define LTQ_SPI_RB		0x24
 42#define LTQ_SPI_RXFCON		0x30
 43#define LTQ_SPI_TXFCON		0x34
 44#define LTQ_SPI_FSTAT		0x38
 45#define LTQ_SPI_BRT		0x40
 46#define LTQ_SPI_BRSTAT		0x44
 47#define LTQ_SPI_SFCON		0x60
 48#define LTQ_SPI_SFSTAT		0x64
 49#define LTQ_SPI_GPOCON		0x70
 50#define LTQ_SPI_GPOSTAT		0x74
 51#define LTQ_SPI_FPGO		0x78
 52#define LTQ_SPI_RXREQ		0x80
 53#define LTQ_SPI_RXCNT		0x84
 54#define LTQ_SPI_DMACON		0xec
 55#define LTQ_SPI_IRNEN		0xf4
 56#define LTQ_SPI_IRNICR		0xf8
 57#define LTQ_SPI_IRNCR		0xfc
 58
 59#define LTQ_SPI_CLC_SMC_S	16	/* Clock divider for sleep mode */
 60#define LTQ_SPI_CLC_SMC_M	(0xFF << LTQ_SPI_CLC_SMC_S)
 61#define LTQ_SPI_CLC_RMC_S	8	/* Clock divider for normal run mode */
 62#define LTQ_SPI_CLC_RMC_M	(0xFF << LTQ_SPI_CLC_RMC_S)
 63#define LTQ_SPI_CLC_DISS	BIT(1)	/* Disable status bit */
 64#define LTQ_SPI_CLC_DISR	BIT(0)	/* Disable request bit */
 65
 66#define LTQ_SPI_ID_TXFS_S	24	/* Implemented TX FIFO size */
 67#define LTQ_SPI_ID_TXFS_M	(0x3F << LTQ_SPI_ID_TXFS_S)
 68#define LTQ_SPI_ID_RXFS_S	16	/* Implemented RX FIFO size */
 69#define LTQ_SPI_ID_RXFS_M	(0x3F << LTQ_SPI_ID_RXFS_S)
 70#define LTQ_SPI_ID_MOD_S	8	/* Module ID */
 71#define LTQ_SPI_ID_MOD_M	(0xff << LTQ_SPI_ID_MOD_S)
 72#define LTQ_SPI_ID_CFG_S	5	/* DMA interface support */
 73#define LTQ_SPI_ID_CFG_M	(1 << LTQ_SPI_ID_CFG_S)
 74#define LTQ_SPI_ID_REV_M	0x1F	/* Hardware revision number */
 75
 76#define LTQ_SPI_CON_BM_S	16	/* Data width selection */
 77#define LTQ_SPI_CON_BM_M	(0x1F << LTQ_SPI_CON_BM_S)
 78#define LTQ_SPI_CON_EM		BIT(24)	/* Echo mode */
 79#define LTQ_SPI_CON_IDLE	BIT(23)	/* Idle bit value */
 80#define LTQ_SPI_CON_ENBV	BIT(22)	/* Enable byte valid control */
 81#define LTQ_SPI_CON_RUEN	BIT(12)	/* Receive underflow error enable */
 82#define LTQ_SPI_CON_TUEN	BIT(11)	/* Transmit underflow error enable */
 83#define LTQ_SPI_CON_AEN		BIT(10)	/* Abort error enable */
 84#define LTQ_SPI_CON_REN		BIT(9)	/* Receive overflow error enable */
 85#define LTQ_SPI_CON_TEN		BIT(8)	/* Transmit overflow error enable */
 86#define LTQ_SPI_CON_LB		BIT(7)	/* Loopback control */
 87#define LTQ_SPI_CON_PO		BIT(6)	/* Clock polarity control */
 88#define LTQ_SPI_CON_PH		BIT(5)	/* Clock phase control */
 89#define LTQ_SPI_CON_HB		BIT(4)	/* Heading control */
 90#define LTQ_SPI_CON_RXOFF	BIT(1)	/* Switch receiver off */
 91#define LTQ_SPI_CON_TXOFF	BIT(0)	/* Switch transmitter off */
 92
 93#define LTQ_SPI_STAT_RXBV_S	28
 94#define LTQ_SPI_STAT_RXBV_M	(0x7 << LTQ_SPI_STAT_RXBV_S)
 95#define LTQ_SPI_STAT_BSY	BIT(13)	/* Busy flag */
 96#define LTQ_SPI_STAT_RUE	BIT(12)	/* Receive underflow error flag */
 97#define LTQ_SPI_STAT_TUE	BIT(11)	/* Transmit underflow error flag */
 98#define LTQ_SPI_STAT_AE		BIT(10)	/* Abort error flag */
 99#define LTQ_SPI_STAT_RE		BIT(9)	/* Receive error flag */
100#define LTQ_SPI_STAT_TE		BIT(8)	/* Transmit error flag */
101#define LTQ_SPI_STAT_ME		BIT(7)	/* Mode error flag */
102#define LTQ_SPI_STAT_MS		BIT(1)	/* Master/slave select bit */
103#define LTQ_SPI_STAT_EN		BIT(0)	/* Enable bit */
104#define LTQ_SPI_STAT_ERRORS	(LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
105				 LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
106				 LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE)
107
108#define LTQ_SPI_WHBSTATE_SETTUE	BIT(15)	/* Set transmit underflow error flag */
109#define LTQ_SPI_WHBSTATE_SETAE	BIT(14)	/* Set abort error flag */
110#define LTQ_SPI_WHBSTATE_SETRE	BIT(13)	/* Set receive error flag */
111#define LTQ_SPI_WHBSTATE_SETTE	BIT(12)	/* Set transmit error flag */
112#define LTQ_SPI_WHBSTATE_CLRTUE	BIT(11)	/* Clear transmit underflow error flag */
113#define LTQ_SPI_WHBSTATE_CLRAE	BIT(10)	/* Clear abort error flag */
114#define LTQ_SPI_WHBSTATE_CLRRE	BIT(9)	/* Clear receive error flag */
115#define LTQ_SPI_WHBSTATE_CLRTE	BIT(8)	/* Clear transmit error flag */
116#define LTQ_SPI_WHBSTATE_SETME	BIT(7)	/* Set mode error flag */
117#define LTQ_SPI_WHBSTATE_CLRME	BIT(6)	/* Clear mode error flag */
118#define LTQ_SPI_WHBSTATE_SETRUE	BIT(5)	/* Set receive underflow error flag */
119#define LTQ_SPI_WHBSTATE_CLRRUE	BIT(4)	/* Clear receive underflow error flag */
120#define LTQ_SPI_WHBSTATE_SETMS	BIT(3)	/* Set master select bit */
121#define LTQ_SPI_WHBSTATE_CLRMS	BIT(2)	/* Clear master select bit */
122#define LTQ_SPI_WHBSTATE_SETEN	BIT(1)	/* Set enable bit (operational mode) */
123#define LTQ_SPI_WHBSTATE_CLREN	BIT(0)	/* Clear enable bit (config mode */
124#define LTQ_SPI_WHBSTATE_CLR_ERRORS	(LTQ_SPI_WHBSTATE_CLRRUE | \
125					 LTQ_SPI_WHBSTATE_CLRME | \
126					 LTQ_SPI_WHBSTATE_CLRTE | \
127					 LTQ_SPI_WHBSTATE_CLRRE | \
128					 LTQ_SPI_WHBSTATE_CLRAE | \
129					 LTQ_SPI_WHBSTATE_CLRTUE)
130
131#define LTQ_SPI_RXFCON_RXFITL_S	8	/* FIFO interrupt trigger level */
132#define LTQ_SPI_RXFCON_RXFITL_M	(0x3F << LTQ_SPI_RXFCON_RXFITL_S)
133#define LTQ_SPI_RXFCON_RXFLU	BIT(1)	/* FIFO flush */
134#define LTQ_SPI_RXFCON_RXFEN	BIT(0)	/* FIFO enable */
135
136#define LTQ_SPI_TXFCON_TXFITL_S	8	/* FIFO interrupt trigger level */
137#define LTQ_SPI_TXFCON_TXFITL_M	(0x3F << LTQ_SPI_TXFCON_TXFITL_S)
138#define LTQ_SPI_TXFCON_TXFLU	BIT(1)	/* FIFO flush */
139#define LTQ_SPI_TXFCON_TXFEN	BIT(0)	/* FIFO enable */
140
141#define LTQ_SPI_FSTAT_RXFFL_S	0
142#define LTQ_SPI_FSTAT_RXFFL_M	(0x3f << LTQ_SPI_FSTAT_RXFFL_S)
143#define LTQ_SPI_FSTAT_TXFFL_S	8
144#define LTQ_SPI_FSTAT_TXFFL_M	(0x3f << LTQ_SPI_FSTAT_TXFFL_S)
145
146#define LTQ_SPI_GPOCON_ISCSBN_S	8
147#define LTQ_SPI_GPOCON_INVOUTN_S	0
148
149#define LTQ_SPI_FGPO_SETOUTN_S	8
150#define LTQ_SPI_FGPO_CLROUTN_S	0
151
152#define LTQ_SPI_RXREQ_RXCNT_M	0xFFFF	/* Receive count value */
153#define LTQ_SPI_RXCNT_TODO_M	0xFFFF	/* Recevie to-do value */
154
155#define LTQ_SPI_IRNEN_TFI	BIT(4)	/* TX finished interrupt */
156#define LTQ_SPI_IRNEN_F		BIT(3)	/* Frame end interrupt request */
157#define LTQ_SPI_IRNEN_E		BIT(2)	/* Error end interrupt request */
158#define LTQ_SPI_IRNEN_T_XWAY	BIT(1)	/* Transmit end interrupt request */
159#define LTQ_SPI_IRNEN_R_XWAY	BIT(0)	/* Receive end interrupt request */
160#define LTQ_SPI_IRNEN_R_XRX	BIT(1)	/* Transmit end interrupt request */
161#define LTQ_SPI_IRNEN_T_XRX	BIT(0)	/* Receive end interrupt request */
162#define LTQ_SPI_IRNEN_ALL	0x1F
163
 
 
164struct lantiq_ssc_hwcfg {
165	unsigned int irnen_r;
166	unsigned int irnen_t;
 
 
 
 
 
167};
168
169struct lantiq_ssc_spi {
170	struct spi_master		*master;
171	struct device			*dev;
172	void __iomem			*regbase;
173	struct clk			*spi_clk;
174	struct clk			*fpi_clk;
175	const struct lantiq_ssc_hwcfg	*hwcfg;
176
177	spinlock_t			lock;
178	struct workqueue_struct		*wq;
179	struct work_struct		work;
180
181	const u8			*tx;
182	u8				*rx;
183	unsigned int			tx_todo;
184	unsigned int			rx_todo;
185	unsigned int			bits_per_word;
186	unsigned int			speed_hz;
187	unsigned int			tx_fifo_size;
188	unsigned int			rx_fifo_size;
189	unsigned int			base_cs;
 
190};
191
192static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
193{
194	return __raw_readl(spi->regbase + reg);
195}
196
197static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
198			      u32 reg)
199{
200	__raw_writel(val, spi->regbase + reg);
201}
202
203static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
204			     u32 set, u32 reg)
205{
206	u32 val = __raw_readl(spi->regbase + reg);
207
208	val &= ~clr;
209	val |= set;
210	__raw_writel(val, spi->regbase + reg);
211}
212
213static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
214{
 
215	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
216
217	return (fstat & LTQ_SPI_FSTAT_TXFFL_M) >> LTQ_SPI_FSTAT_TXFFL_S;
218}
219
220static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
221{
 
222	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
223
224	return fstat & LTQ_SPI_FSTAT_RXFFL_M;
225}
226
227static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
228{
229	return spi->tx_fifo_size - tx_fifo_level(spi);
230}
231
232static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
233{
234	u32 val = spi->rx_fifo_size << LTQ_SPI_RXFCON_RXFITL_S;
235
236	val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
237	lantiq_ssc_writel(spi, val, LTQ_SPI_RXFCON);
238}
239
240static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
241{
242	u32 val = 1 << LTQ_SPI_TXFCON_TXFITL_S;
243
244	val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
245	lantiq_ssc_writel(spi, val, LTQ_SPI_TXFCON);
246}
247
248static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
249{
250	lantiq_ssc_maskl(spi, 0, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
251}
252
253static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
254{
255	lantiq_ssc_maskl(spi, 0, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
256}
257
258static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
259{
260	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
261}
262
263static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
264{
265	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
266}
267
268static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
269			      unsigned int max_speed_hz)
270{
271	u32 spi_clk, brt;
272
273	/*
274	 * SPI module clock is derived from FPI bus clock dependent on
275	 * divider value in CLC.RMS which is always set to 1.
276	 *
277	 *                 f_SPI
278	 * baudrate = --------------
279	 *             2 * (BR + 1)
280	 */
281	spi_clk = clk_get_rate(spi->fpi_clk) / 2;
282
283	if (max_speed_hz > spi_clk)
284		brt = 0;
285	else
286		brt = spi_clk / max_speed_hz - 1;
287
288	if (brt > 0xFFFF)
289		brt = 0xFFFF;
290
291	dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
292		spi_clk, max_speed_hz, brt);
293
294	lantiq_ssc_writel(spi, brt, LTQ_SPI_BRT);
295}
296
297static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
298				   unsigned int bits_per_word)
299{
300	u32 bm;
301
302	/* CON.BM value = bits_per_word - 1 */
303	bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_S;
304
305	lantiq_ssc_maskl(spi, LTQ_SPI_CON_BM_M, bm, LTQ_SPI_CON);
306}
307
308static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
309				unsigned int mode)
310{
311	u32 con_set = 0, con_clr = 0;
312
313	/*
314	 * SPI mode mapping in CON register:
315	 * Mode CPOL CPHA CON.PO CON.PH
316	 *  0    0    0      0      1
317	 *  1    0    1      0      0
318	 *  2    1    0      1      1
319	 *  3    1    1      1      0
320	 */
321	if (mode & SPI_CPHA)
322		con_clr |= LTQ_SPI_CON_PH;
323	else
324		con_set |= LTQ_SPI_CON_PH;
325
326	if (mode & SPI_CPOL)
327		con_set |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
328	else
329		con_clr |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
330
331	/* Set heading control */
332	if (mode & SPI_LSB_FIRST)
333		con_clr |= LTQ_SPI_CON_HB;
334	else
335		con_set |= LTQ_SPI_CON_HB;
336
337	/* Set loopback mode */
338	if (mode & SPI_LOOP)
339		con_set |= LTQ_SPI_CON_LB;
340	else
341		con_clr |= LTQ_SPI_CON_LB;
342
343	lantiq_ssc_maskl(spi, con_clr, con_set, LTQ_SPI_CON);
344}
345
346static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
347{
348	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
349
350	/*
351	 * Set clock divider for run mode to 1 to
352	 * run at same frequency as FPI bus
353	 */
354	lantiq_ssc_writel(spi, 1 << LTQ_SPI_CLC_RMC_S, LTQ_SPI_CLC);
355
356	/* Put controller into config mode */
357	hw_enter_config_mode(spi);
358
359	/* Clear error flags */
360	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
361
362	/* Enable error checking, disable TX/RX */
363	lantiq_ssc_writel(spi, LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
364		LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN | LTQ_SPI_CON_TXOFF |
365		LTQ_SPI_CON_RXOFF, LTQ_SPI_CON);
366
367	/* Setup default SPI mode */
368	hw_setup_bits_per_word(spi, spi->bits_per_word);
369	hw_setup_clock_mode(spi, SPI_MODE_0);
370
371	/* Enable master mode and clear error flags */
372	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETMS |
373			       LTQ_SPI_WHBSTATE_CLR_ERRORS,
374			       LTQ_SPI_WHBSTATE);
375
376	/* Reset GPIO/CS registers */
377	lantiq_ssc_writel(spi, 0, LTQ_SPI_GPOCON);
378	lantiq_ssc_writel(spi, 0xFF00, LTQ_SPI_FPGO);
379
380	/* Enable and flush FIFOs */
381	rx_fifo_reset(spi);
382	tx_fifo_reset(spi);
383
384	/* Enable interrupts */
385	lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r |
386			  LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
387}
388
389static int lantiq_ssc_setup(struct spi_device *spidev)
390{
391	struct spi_master *master = spidev->master;
392	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
393	unsigned int cs = spidev->chip_select;
394	u32 gpocon;
395
396	/* GPIOs are used for CS */
397	if (gpio_is_valid(spidev->cs_gpio))
398		return 0;
399
400	dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
401
402	if (cs < spi->base_cs) {
403		dev_err(spi->dev,
404			"chipselect %i too small (min %i)\n", cs, spi->base_cs);
405		return -EINVAL;
406	}
407
408	/* set GPO pin to CS mode */
409	gpocon = 1 << ((cs - spi->base_cs) + LTQ_SPI_GPOCON_ISCSBN_S);
410
411	/* invert GPO pin */
412	if (spidev->mode & SPI_CS_HIGH)
413		gpocon |= 1 << (cs - spi->base_cs);
414
415	lantiq_ssc_maskl(spi, 0, gpocon, LTQ_SPI_GPOCON);
416
417	return 0;
418}
419
420static int lantiq_ssc_prepare_message(struct spi_master *master,
421				      struct spi_message *message)
422{
423	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
424
425	hw_enter_config_mode(spi);
426	hw_setup_clock_mode(spi, message->spi->mode);
427	hw_enter_active_mode(spi);
428
429	return 0;
430}
431
432static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
433			      struct spi_device *spidev, struct spi_transfer *t)
434{
435	unsigned int speed_hz = t->speed_hz;
436	unsigned int bits_per_word = t->bits_per_word;
437	u32 con;
438
439	if (bits_per_word != spi->bits_per_word ||
440		speed_hz != spi->speed_hz) {
441		hw_enter_config_mode(spi);
442		hw_setup_speed_hz(spi, speed_hz);
443		hw_setup_bits_per_word(spi, bits_per_word);
444		hw_enter_active_mode(spi);
445
446		spi->speed_hz = speed_hz;
447		spi->bits_per_word = bits_per_word;
448	}
449
450	/* Configure transmitter and receiver */
451	con = lantiq_ssc_readl(spi, LTQ_SPI_CON);
452	if (t->tx_buf)
453		con &= ~LTQ_SPI_CON_TXOFF;
454	else
455		con |= LTQ_SPI_CON_TXOFF;
456
457	if (t->rx_buf)
458		con &= ~LTQ_SPI_CON_RXOFF;
459	else
460		con |= LTQ_SPI_CON_RXOFF;
461
462	lantiq_ssc_writel(spi, con, LTQ_SPI_CON);
463}
464
465static int lantiq_ssc_unprepare_message(struct spi_master *master,
466					struct spi_message *message)
467{
468	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
469
470	flush_workqueue(spi->wq);
471
472	/* Disable transmitter and receiver while idle */
473	lantiq_ssc_maskl(spi, 0, LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF,
474			 LTQ_SPI_CON);
475
476	return 0;
477}
478
479static void tx_fifo_write(struct lantiq_ssc_spi *spi)
480{
481	const u8 *tx8;
482	const u16 *tx16;
483	const u32 *tx32;
484	u32 data;
485	unsigned int tx_free = tx_fifo_free(spi);
486
 
487	while (spi->tx_todo && tx_free) {
488		switch (spi->bits_per_word) {
489		case 2 ... 8:
490			tx8 = spi->tx;
491			data = *tx8;
492			spi->tx_todo--;
493			spi->tx++;
494			break;
495		case 16:
496			tx16 = (u16 *) spi->tx;
497			data = *tx16;
498			spi->tx_todo -= 2;
499			spi->tx += 2;
500			break;
501		case 32:
502			tx32 = (u32 *) spi->tx;
503			data = *tx32;
504			spi->tx_todo -= 4;
505			spi->tx += 4;
506			break;
507		default:
508			WARN_ON(1);
509			data = 0;
510			break;
511		}
512
513		lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
514		tx_free--;
 
515	}
516}
517
518static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
519{
520	u8 *rx8;
521	u16 *rx16;
522	u32 *rx32;
523	u32 data;
524	unsigned int rx_fill = rx_fifo_level(spi);
525
 
 
 
 
 
 
 
526	while (rx_fill) {
527		data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
528
529		switch (spi->bits_per_word) {
530		case 2 ... 8:
531			rx8 = spi->rx;
532			*rx8 = data;
533			spi->rx_todo--;
534			spi->rx++;
535			break;
536		case 16:
537			rx16 = (u16 *) spi->rx;
538			*rx16 = data;
539			spi->rx_todo -= 2;
540			spi->rx += 2;
541			break;
542		case 32:
543			rx32 = (u32 *) spi->rx;
544			*rx32 = data;
545			spi->rx_todo -= 4;
546			spi->rx += 4;
547			break;
548		default:
549			WARN_ON(1);
550			break;
551		}
552
553		rx_fill--;
554	}
555}
556
557static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
558{
559	u32 data, *rx32;
560	u8 *rx8;
561	unsigned int rxbv, shift;
562	unsigned int rx_fill = rx_fifo_level(spi);
563
564	/*
565	 * In RX-only mode the bits per word value is ignored by HW. A value
566	 * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
567	 * If remaining RX bytes are less than 4, the FIFO must be read
568	 * differently. The amount of received and valid bytes is indicated
569	 * by STAT.RXBV register value.
570	 */
571	while (rx_fill) {
572		if (spi->rx_todo < 4)  {
573			rxbv = (lantiq_ssc_readl(spi, LTQ_SPI_STAT) &
574				LTQ_SPI_STAT_RXBV_M) >> LTQ_SPI_STAT_RXBV_S;
575			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
576
577			shift = (rxbv - 1) * 8;
578			rx8 = spi->rx;
579
580			while (rxbv) {
581				*rx8++ = (data >> shift) & 0xFF;
582				rxbv--;
583				shift -= 8;
584				spi->rx_todo--;
585				spi->rx++;
586			}
587		} else {
588			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
589			rx32 = (u32 *) spi->rx;
590
591			*rx32++ = data;
592			spi->rx_todo -= 4;
593			spi->rx += 4;
594		}
595		rx_fill--;
596	}
597}
598
599static void rx_request(struct lantiq_ssc_spi *spi)
600{
601	unsigned int rxreq, rxreq_max;
602
603	/*
604	 * To avoid receive overflows at high clocks it is better to request
605	 * only the amount of bytes that fits into all FIFOs. This value
606	 * depends on the FIFO size implemented in hardware.
607	 */
608	rxreq = spi->rx_todo;
609	rxreq_max = spi->rx_fifo_size * 4;
610	if (rxreq > rxreq_max)
611		rxreq = rxreq_max;
612
613	lantiq_ssc_writel(spi, rxreq, LTQ_SPI_RXREQ);
614}
615
616static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
617{
618	struct lantiq_ssc_spi *spi = data;
 
 
 
 
 
 
619
620	if (spi->tx) {
621		if (spi->rx && spi->rx_todo)
622			rx_fifo_read_full_duplex(spi);
623
624		if (spi->tx_todo)
625			tx_fifo_write(spi);
626		else if (!tx_fifo_level(spi))
627			goto completed;
628	} else if (spi->rx) {
629		if (spi->rx_todo) {
630			rx_fifo_read_half_duplex(spi);
631
632			if (spi->rx_todo)
633				rx_request(spi);
634			else
635				goto completed;
636		} else {
637			goto completed;
638		}
639	}
640
 
641	return IRQ_HANDLED;
642
643completed:
644	queue_work(spi->wq, &spi->work);
 
645
646	return IRQ_HANDLED;
647}
648
649static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
650{
651	struct lantiq_ssc_spi *spi = data;
 
652	u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
 
653
654	if (!(stat & LTQ_SPI_STAT_ERRORS))
655		return IRQ_NONE;
656
 
 
 
 
657	if (stat & LTQ_SPI_STAT_RUE)
658		dev_err(spi->dev, "receive underflow error\n");
659	if (stat & LTQ_SPI_STAT_TUE)
660		dev_err(spi->dev, "transmit underflow error\n");
661	if (stat & LTQ_SPI_STAT_AE)
662		dev_err(spi->dev, "abort error\n");
663	if (stat & LTQ_SPI_STAT_RE)
664		dev_err(spi->dev, "receive overflow error\n");
665	if (stat & LTQ_SPI_STAT_TE)
666		dev_err(spi->dev, "transmit overflow error\n");
667	if (stat & LTQ_SPI_STAT_ME)
668		dev_err(spi->dev, "mode error\n");
669
670	/* Clear error flags */
671	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
672
673	/* set bad status so it can be retried */
674	if (spi->master->cur_msg)
675		spi->master->cur_msg->status = -EIO;
676	queue_work(spi->wq, &spi->work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677
678	return IRQ_HANDLED;
679}
680
681static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
682			  struct spi_transfer *t)
683{
684	unsigned long flags;
685
686	spin_lock_irqsave(&spi->lock, flags);
687
688	spi->tx = t->tx_buf;
689	spi->rx = t->rx_buf;
690
691	if (t->tx_buf) {
692		spi->tx_todo = t->len;
693
694		/* initially fill TX FIFO */
695		tx_fifo_write(spi);
696	}
697
698	if (spi->rx) {
699		spi->rx_todo = t->len;
700
701		/* start shift clock in RX-only mode */
702		if (!spi->tx)
703			rx_request(spi);
704	}
705
706	spin_unlock_irqrestore(&spi->lock, flags);
707
708	return t->len;
709}
710
711/*
712 * The driver only gets an interrupt when the FIFO is empty, but there
713 * is an additional shift register from which the data is written to
714 * the wire. We get the last interrupt when the controller starts to
715 * write the last word to the wire, not when it is finished. Do busy
716 * waiting till it finishes.
717 */
718static void lantiq_ssc_bussy_work(struct work_struct *work)
719{
720	struct lantiq_ssc_spi *spi;
721	unsigned long long timeout = 8LL * 1000LL;
722	unsigned long end;
723
724	spi = container_of(work, typeof(*spi), work);
725
726	do_div(timeout, spi->speed_hz);
727	timeout += timeout + 100; /* some tolerance */
728
729	end = jiffies + msecs_to_jiffies(timeout);
730	do {
731		u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
732
733		if (!(stat & LTQ_SPI_STAT_BSY)) {
734			spi_finalize_current_transfer(spi->master);
735			return;
736		}
737
738		cond_resched();
739	} while (!time_after_eq(jiffies, end));
740
741	if (spi->master->cur_msg)
742		spi->master->cur_msg->status = -EIO;
743	spi_finalize_current_transfer(spi->master);
744}
745
746static void lantiq_ssc_handle_err(struct spi_master *master,
747				  struct spi_message *message)
748{
749	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
750
751	/* flush FIFOs on timeout */
752	rx_fifo_flush(spi);
753	tx_fifo_flush(spi);
754}
755
756static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
757{
758	struct lantiq_ssc_spi *spi = spi_master_get_devdata(spidev->master);
759	unsigned int cs = spidev->chip_select;
760	u32 fgpo;
761
762	if (!!(spidev->mode & SPI_CS_HIGH) == enable)
763		fgpo = (1 << (cs - spi->base_cs));
764	else
765		fgpo = (1 << (cs - spi->base_cs + LTQ_SPI_FGPO_SETOUTN_S));
766
767	lantiq_ssc_writel(spi, fgpo, LTQ_SPI_FPGO);
768}
769
770static int lantiq_ssc_transfer_one(struct spi_master *master,
771				   struct spi_device *spidev,
772				   struct spi_transfer *t)
773{
774	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
775
776	hw_setup_transfer(spi, spidev, t);
777
778	return transfer_start(spi, spidev, t);
779}
780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
781static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
782	.irnen_r = LTQ_SPI_IRNEN_R_XWAY,
783	.irnen_t = LTQ_SPI_IRNEN_T_XWAY,
 
 
 
 
 
784};
785
786static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
787	.irnen_r = LTQ_SPI_IRNEN_R_XRX,
788	.irnen_t = LTQ_SPI_IRNEN_T_XRX,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
789};
790
791static const struct of_device_id lantiq_ssc_match[] = {
792	{ .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
793	{ .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
794	{ .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
 
795	{},
796};
797MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
798
799static int lantiq_ssc_probe(struct platform_device *pdev)
800{
801	struct device *dev = &pdev->dev;
802	struct spi_master *master;
803	struct resource *res;
804	struct lantiq_ssc_spi *spi;
805	const struct lantiq_ssc_hwcfg *hwcfg;
806	const struct of_device_id *match;
807	int err, rx_irq, tx_irq, err_irq;
808	u32 id, supports_dma, revision;
809	unsigned int num_cs;
 
810
811	match = of_match_device(lantiq_ssc_match, dev);
812	if (!match) {
813		dev_err(dev, "no device match\n");
814		return -EINVAL;
815	}
816	hwcfg = match->data;
817
818	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
819	if (!res) {
820		dev_err(dev, "failed to get resources\n");
821		return -ENXIO;
822	}
823
824	rx_irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
825	if (rx_irq < 0) {
826		dev_err(dev, "failed to get %s\n", LTQ_SPI_RX_IRQ_NAME);
827		return -ENXIO;
828	}
829
830	tx_irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
831	if (tx_irq < 0) {
832		dev_err(dev, "failed to get %s\n", LTQ_SPI_TX_IRQ_NAME);
833		return -ENXIO;
834	}
835
836	err_irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
837	if (err_irq < 0) {
838		dev_err(dev, "failed to get %s\n", LTQ_SPI_ERR_IRQ_NAME);
839		return -ENXIO;
840	}
841
842	master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
843	if (!master)
844		return -ENOMEM;
845
846	spi = spi_master_get_devdata(master);
847	spi->master = master;
848	spi->dev = dev;
849	spi->hwcfg = hwcfg;
850	platform_set_drvdata(pdev, spi);
851
852	spi->regbase = devm_ioremap_resource(dev, res);
853	if (IS_ERR(spi->regbase)) {
854		err = PTR_ERR(spi->regbase);
855		goto err_master_put;
856	}
857
858	err = devm_request_irq(dev, rx_irq, lantiq_ssc_xmit_interrupt,
859			       0, LTQ_SPI_RX_IRQ_NAME, spi);
860	if (err)
861		goto err_master_put;
862
863	err = devm_request_irq(dev, tx_irq, lantiq_ssc_xmit_interrupt,
864			       0, LTQ_SPI_TX_IRQ_NAME, spi);
865	if (err)
866		goto err_master_put;
867
868	err = devm_request_irq(dev, err_irq, lantiq_ssc_err_interrupt,
869			       0, LTQ_SPI_ERR_IRQ_NAME, spi);
870	if (err)
871		goto err_master_put;
872
873	spi->spi_clk = devm_clk_get(dev, "gate");
874	if (IS_ERR(spi->spi_clk)) {
875		err = PTR_ERR(spi->spi_clk);
876		goto err_master_put;
877	}
878	err = clk_prepare_enable(spi->spi_clk);
879	if (err)
880		goto err_master_put;
881
882	/*
883	 * Use the old clk_get_fpi() function on Lantiq platform, till it
884	 * supports common clk.
885	 */
886#if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
887	spi->fpi_clk = clk_get_fpi();
888#else
889	spi->fpi_clk = clk_get(dev, "freq");
890#endif
891	if (IS_ERR(spi->fpi_clk)) {
892		err = PTR_ERR(spi->fpi_clk);
893		goto err_clk_disable;
894	}
895
896	num_cs = 8;
897	of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
898
899	spi->base_cs = 1;
900	of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
901
902	spin_lock_init(&spi->lock);
903	spi->bits_per_word = 8;
904	spi->speed_hz = 0;
905
906	master->dev.of_node = pdev->dev.of_node;
907	master->num_chipselect = num_cs;
908	master->setup = lantiq_ssc_setup;
909	master->set_cs = lantiq_ssc_set_cs;
910	master->handle_err = lantiq_ssc_handle_err;
911	master->prepare_message = lantiq_ssc_prepare_message;
912	master->unprepare_message = lantiq_ssc_unprepare_message;
913	master->transfer_one = lantiq_ssc_transfer_one;
914	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
915				SPI_LOOP;
916	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
917				     SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
 
918
919	spi->wq = alloc_ordered_workqueue(dev_name(dev), 0);
920	if (!spi->wq) {
921		err = -ENOMEM;
922		goto err_clk_put;
923	}
924	INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
925
926	id = lantiq_ssc_readl(spi, LTQ_SPI_ID);
927	spi->tx_fifo_size = (id & LTQ_SPI_ID_TXFS_M) >> LTQ_SPI_ID_TXFS_S;
928	spi->rx_fifo_size = (id & LTQ_SPI_ID_RXFS_M) >> LTQ_SPI_ID_RXFS_S;
929	supports_dma = (id & LTQ_SPI_ID_CFG_M) >> LTQ_SPI_ID_CFG_S;
930	revision = id & LTQ_SPI_ID_REV_M;
931
932	lantiq_ssc_hw_init(spi);
933
934	dev_info(dev,
935		"Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
936		revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
937
938	err = devm_spi_register_master(dev, master);
939	if (err) {
940		dev_err(dev, "failed to register spi_master\n");
941		goto err_wq_destroy;
942	}
943
944	return 0;
945
946err_wq_destroy:
947	destroy_workqueue(spi->wq);
948err_clk_put:
949	clk_put(spi->fpi_clk);
950err_clk_disable:
951	clk_disable_unprepare(spi->spi_clk);
952err_master_put:
953	spi_master_put(master);
954
955	return err;
956}
957
958static int lantiq_ssc_remove(struct platform_device *pdev)
959{
960	struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
961
962	lantiq_ssc_writel(spi, 0, LTQ_SPI_IRNEN);
963	lantiq_ssc_writel(spi, 0, LTQ_SPI_CLC);
964	rx_fifo_flush(spi);
965	tx_fifo_flush(spi);
966	hw_enter_config_mode(spi);
967
968	destroy_workqueue(spi->wq);
969	clk_disable_unprepare(spi->spi_clk);
970	clk_put(spi->fpi_clk);
971
972	return 0;
973}
974
975static struct platform_driver lantiq_ssc_driver = {
976	.probe = lantiq_ssc_probe,
977	.remove = lantiq_ssc_remove,
978	.driver = {
979		.name = "spi-lantiq-ssc",
980		.of_match_table = lantiq_ssc_match,
981	},
982};
983module_platform_driver(lantiq_ssc_driver);
984
985MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
986MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
987MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
988MODULE_LICENSE("GPL");
989MODULE_ALIAS("platform:spi-lantiq-ssc");