Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * MMCIF eMMC driver.
   3 *
   4 * Copyright (C) 2010 Renesas Solutions Corp.
   5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License.
  10 */
  11
  12/*
  13 * The MMCIF driver is now processing MMC requests asynchronously, according
  14 * to the Linux MMC API requirement.
  15 *
  16 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
  17 * data, and optional stop. To achieve asynchronous processing each of these
  18 * stages is split into two halves: a top and a bottom half. The top half
  19 * initialises the hardware, installs a timeout handler to handle completion
  20 * timeouts, and returns. In case of the command stage this immediately returns
  21 * control to the caller, leaving all further processing to run asynchronously.
  22 * All further request processing is performed by the bottom halves.
  23 *
  24 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
  25 * thread, a DMA completion callback, if DMA is used, a timeout work, and
  26 * request- and stage-specific handler methods.
  27 *
  28 * Each bottom half run begins with either a hardware interrupt, a DMA callback
  29 * invocation, or a timeout work run. In case of an error or a successful
  30 * processing completion, the MMC core is informed and the request processing is
  31 * finished. In case processing has to continue, i.e., if data has to be read
  32 * from or written to the card, or if a stop command has to be sent, the next
  33 * top half is called, which performs the necessary hardware handling and
  34 * reschedules the timeout work. This returns the driver state machine into the
  35 * bottom half waiting state.
  36 */
  37
  38#include <linux/bitops.h>
  39#include <linux/clk.h>
  40#include <linux/completion.h>
  41#include <linux/delay.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/dmaengine.h>
  44#include <linux/mmc/card.h>
  45#include <linux/mmc/core.h>
  46#include <linux/mmc/host.h>
  47#include <linux/mmc/mmc.h>
  48#include <linux/mmc/sdio.h>
  49#include <linux/mmc/sh_mmcif.h>
  50#include <linux/mmc/slot-gpio.h>
  51#include <linux/mod_devicetable.h>
  52#include <linux/mutex.h>
  53#include <linux/of_device.h>
  54#include <linux/pagemap.h>
 
  55#include <linux/platform_device.h>
  56#include <linux/pm_qos.h>
  57#include <linux/pm_runtime.h>
  58#include <linux/sh_dma.h>
  59#include <linux/spinlock.h>
  60#include <linux/module.h>
  61
  62#define DRIVER_NAME	"sh_mmcif"
  63
  64/* CE_CMD_SET */
  65#define CMD_MASK		0x3f000000
  66#define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
  67#define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
  68#define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
  69#define CMD_SET_RBSY		(1 << 21) /* R1b */
  70#define CMD_SET_CCSEN		(1 << 20)
  71#define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
  72#define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
  73#define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
  74#define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
  75#define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
  76#define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
  77#define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
  78#define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
  79#define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
  80#define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
  81#define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
  82#define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
  83#define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
  84#define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
  85#define CMD_SET_CCSH		(1 << 5)
  86#define CMD_SET_DARS		(1 << 2) /* Dual Data Rate */
  87#define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
  88#define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
  89#define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
  90
  91/* CE_CMD_CTRL */
  92#define CMD_CTRL_BREAK		(1 << 0)
  93
  94/* CE_BLOCK_SET */
  95#define BLOCK_SIZE_MASK		0x0000ffff
  96
  97/* CE_INT */
  98#define INT_CCSDE		(1 << 29)
  99#define INT_CMD12DRE		(1 << 26)
 100#define INT_CMD12RBE		(1 << 25)
 101#define INT_CMD12CRE		(1 << 24)
 102#define INT_DTRANE		(1 << 23)
 103#define INT_BUFRE		(1 << 22)
 104#define INT_BUFWEN		(1 << 21)
 105#define INT_BUFREN		(1 << 20)
 106#define INT_CCSRCV		(1 << 19)
 107#define INT_RBSYE		(1 << 17)
 108#define INT_CRSPE		(1 << 16)
 109#define INT_CMDVIO		(1 << 15)
 110#define INT_BUFVIO		(1 << 14)
 111#define INT_WDATERR		(1 << 11)
 112#define INT_RDATERR		(1 << 10)
 113#define INT_RIDXERR		(1 << 9)
 114#define INT_RSPERR		(1 << 8)
 115#define INT_CCSTO		(1 << 5)
 116#define INT_CRCSTO		(1 << 4)
 117#define INT_WDATTO		(1 << 3)
 118#define INT_RDATTO		(1 << 2)
 119#define INT_RBSYTO		(1 << 1)
 120#define INT_RSPTO		(1 << 0)
 121#define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
 122				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
 123				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
 124				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
 125
 126#define INT_ALL			(INT_RBSYE | INT_CRSPE | INT_BUFREN |	 \
 127				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
 128				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
 129
 130#define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE)
 131
 132/* CE_INT_MASK */
 133#define MASK_ALL		0x00000000
 134#define MASK_MCCSDE		(1 << 29)
 135#define MASK_MCMD12DRE		(1 << 26)
 136#define MASK_MCMD12RBE		(1 << 25)
 137#define MASK_MCMD12CRE		(1 << 24)
 138#define MASK_MDTRANE		(1 << 23)
 139#define MASK_MBUFRE		(1 << 22)
 140#define MASK_MBUFWEN		(1 << 21)
 141#define MASK_MBUFREN		(1 << 20)
 142#define MASK_MCCSRCV		(1 << 19)
 143#define MASK_MRBSYE		(1 << 17)
 144#define MASK_MCRSPE		(1 << 16)
 145#define MASK_MCMDVIO		(1 << 15)
 146#define MASK_MBUFVIO		(1 << 14)
 147#define MASK_MWDATERR		(1 << 11)
 148#define MASK_MRDATERR		(1 << 10)
 149#define MASK_MRIDXERR		(1 << 9)
 150#define MASK_MRSPERR		(1 << 8)
 151#define MASK_MCCSTO		(1 << 5)
 152#define MASK_MCRCSTO		(1 << 4)
 153#define MASK_MWDATTO		(1 << 3)
 154#define MASK_MRDATTO		(1 << 2)
 155#define MASK_MRBSYTO		(1 << 1)
 156#define MASK_MRSPTO		(1 << 0)
 157
 158#define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
 159				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
 160				 MASK_MCRCSTO | MASK_MWDATTO | \
 161				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
 162
 163#define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\
 164				 MASK_MBUFREN | MASK_MBUFWEN |			\
 165				 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |	\
 166				 MASK_MCMD12RBE | MASK_MCMD12CRE)
 167
 168/* CE_HOST_STS1 */
 169#define STS1_CMDSEQ		(1 << 31)
 170
 171/* CE_HOST_STS2 */
 172#define STS2_CRCSTE		(1 << 31)
 173#define STS2_CRC16E		(1 << 30)
 174#define STS2_AC12CRCE		(1 << 29)
 175#define STS2_RSPCRC7E		(1 << 28)
 176#define STS2_CRCSTEBE		(1 << 27)
 177#define STS2_RDATEBE		(1 << 26)
 178#define STS2_AC12REBE		(1 << 25)
 179#define STS2_RSPEBE		(1 << 24)
 180#define STS2_AC12IDXE		(1 << 23)
 181#define STS2_RSPIDXE		(1 << 22)
 182#define STS2_CCSTO		(1 << 15)
 183#define STS2_RDATTO		(1 << 14)
 184#define STS2_DATBSYTO		(1 << 13)
 185#define STS2_CRCSTTO		(1 << 12)
 186#define STS2_AC12BSYTO		(1 << 11)
 187#define STS2_RSPBSYTO		(1 << 10)
 188#define STS2_AC12RSPTO		(1 << 9)
 189#define STS2_RSPTO		(1 << 8)
 190#define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
 191				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
 192#define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
 193				 STS2_DATBSYTO | STS2_CRCSTTO |		\
 194				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
 195				 STS2_AC12RSPTO | STS2_RSPTO)
 196
 197#define CLKDEV_EMMC_DATA	52000000 /* 52MHz */
 198#define CLKDEV_MMC_DATA		20000000 /* 20MHz */
 199#define CLKDEV_INIT		400000   /* 400 KHz */
 200
 201enum sh_mmcif_state {
 202	STATE_IDLE,
 203	STATE_REQUEST,
 204	STATE_IOS,
 205	STATE_TIMEOUT,
 206};
 207
 208enum sh_mmcif_wait_for {
 209	MMCIF_WAIT_FOR_REQUEST,
 210	MMCIF_WAIT_FOR_CMD,
 211	MMCIF_WAIT_FOR_MREAD,
 212	MMCIF_WAIT_FOR_MWRITE,
 213	MMCIF_WAIT_FOR_READ,
 214	MMCIF_WAIT_FOR_WRITE,
 215	MMCIF_WAIT_FOR_READ_END,
 216	MMCIF_WAIT_FOR_WRITE_END,
 217	MMCIF_WAIT_FOR_STOP,
 218};
 219
 220/*
 221 * difference for each SoC
 222 */
 223struct sh_mmcif_host {
 224	struct mmc_host *mmc;
 225	struct mmc_request *mrq;
 226	struct platform_device *pd;
 227	struct clk *clk;
 228	int bus_width;
 229	unsigned char timing;
 230	bool sd_error;
 231	bool dying;
 232	long timeout;
 233	void __iomem *addr;
 234	u32 *pio_ptr;
 235	spinlock_t lock;		/* protect sh_mmcif_host::state */
 236	enum sh_mmcif_state state;
 237	enum sh_mmcif_wait_for wait_for;
 238	struct delayed_work timeout_work;
 239	size_t blocksize;
 240	int sg_idx;
 241	int sg_blkidx;
 242	bool power;
 243	bool ccs_enable;		/* Command Completion Signal support */
 244	bool clk_ctrl2_enable;
 245	struct mutex thread_lock;
 246	u32 clkdiv_map;         /* see CE_CLK_CTRL::CLKDIV */
 247
 248	/* DMA support */
 249	struct dma_chan		*chan_rx;
 250	struct dma_chan		*chan_tx;
 251	struct completion	dma_complete;
 252	bool			dma_active;
 253};
 254
 255static const struct of_device_id sh_mmcif_of_match[] = {
 256	{ .compatible = "renesas,sh-mmcif" },
 257	{ }
 258};
 259MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
 260
 261#define sh_mmcif_host_to_dev(host) (&host->pd->dev)
 262
 263static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
 264					unsigned int reg, u32 val)
 265{
 266	writel(val | readl(host->addr + reg), host->addr + reg);
 267}
 268
 269static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
 270					unsigned int reg, u32 val)
 271{
 272	writel(~val & readl(host->addr + reg), host->addr + reg);
 273}
 274
 275static void sh_mmcif_dma_complete(void *arg)
 276{
 277	struct sh_mmcif_host *host = arg;
 278	struct mmc_request *mrq = host->mrq;
 279	struct device *dev = sh_mmcif_host_to_dev(host);
 280
 281	dev_dbg(dev, "Command completed\n");
 282
 283	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
 284		 dev_name(dev)))
 285		return;
 286
 287	complete(&host->dma_complete);
 288}
 289
 290static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 291{
 292	struct mmc_data *data = host->mrq->data;
 293	struct scatterlist *sg = data->sg;
 294	struct dma_async_tx_descriptor *desc = NULL;
 295	struct dma_chan *chan = host->chan_rx;
 296	struct device *dev = sh_mmcif_host_to_dev(host);
 297	dma_cookie_t cookie = -EINVAL;
 298	int ret;
 299
 300	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 301			 DMA_FROM_DEVICE);
 302	if (ret > 0) {
 303		host->dma_active = true;
 304		desc = dmaengine_prep_slave_sg(chan, sg, ret,
 305			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 306	}
 307
 308	if (desc) {
 309		desc->callback = sh_mmcif_dma_complete;
 310		desc->callback_param = host;
 311		cookie = dmaengine_submit(desc);
 312		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
 313		dma_async_issue_pending(chan);
 314	}
 315	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 316		__func__, data->sg_len, ret, cookie);
 317
 318	if (!desc) {
 319		/* DMA failed, fall back to PIO */
 320		if (ret >= 0)
 321			ret = -EIO;
 322		host->chan_rx = NULL;
 323		host->dma_active = false;
 324		dma_release_channel(chan);
 325		/* Free the Tx channel too */
 326		chan = host->chan_tx;
 327		if (chan) {
 328			host->chan_tx = NULL;
 329			dma_release_channel(chan);
 330		}
 331		dev_warn(dev,
 332			 "DMA failed: %d, falling back to PIO\n", ret);
 333		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 334	}
 335
 336	dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
 337		desc, cookie, data->sg_len);
 338}
 339
 340static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 341{
 342	struct mmc_data *data = host->mrq->data;
 343	struct scatterlist *sg = data->sg;
 344	struct dma_async_tx_descriptor *desc = NULL;
 345	struct dma_chan *chan = host->chan_tx;
 346	struct device *dev = sh_mmcif_host_to_dev(host);
 347	dma_cookie_t cookie = -EINVAL;
 348	int ret;
 349
 350	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 351			 DMA_TO_DEVICE);
 352	if (ret > 0) {
 353		host->dma_active = true;
 354		desc = dmaengine_prep_slave_sg(chan, sg, ret,
 355			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 356	}
 357
 358	if (desc) {
 359		desc->callback = sh_mmcif_dma_complete;
 360		desc->callback_param = host;
 361		cookie = dmaengine_submit(desc);
 362		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
 363		dma_async_issue_pending(chan);
 364	}
 365	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 366		__func__, data->sg_len, ret, cookie);
 367
 368	if (!desc) {
 369		/* DMA failed, fall back to PIO */
 370		if (ret >= 0)
 371			ret = -EIO;
 372		host->chan_tx = NULL;
 373		host->dma_active = false;
 374		dma_release_channel(chan);
 375		/* Free the Rx channel too */
 376		chan = host->chan_rx;
 377		if (chan) {
 378			host->chan_rx = NULL;
 379			dma_release_channel(chan);
 380		}
 381		dev_warn(dev,
 382			 "DMA failed: %d, falling back to PIO\n", ret);
 383		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 384	}
 385
 386	dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
 387		desc, cookie);
 388}
 389
 390static struct dma_chan *
 391sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
 392{
 393	dma_cap_mask_t mask;
 394
 395	dma_cap_zero(mask);
 396	dma_cap_set(DMA_SLAVE, mask);
 397	if (slave_id <= 0)
 398		return NULL;
 399
 400	return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
 401}
 402
 403static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
 404				     struct dma_chan *chan,
 405				     enum dma_transfer_direction direction)
 406{
 407	struct resource *res;
 408	struct dma_slave_config cfg = { 0, };
 409
 410	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
 
 
 
 411	cfg.direction = direction;
 412
 413	if (direction == DMA_DEV_TO_MEM) {
 414		cfg.src_addr = res->start + MMCIF_CE_DATA;
 415		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 416	} else {
 417		cfg.dst_addr = res->start + MMCIF_CE_DATA;
 418		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 419	}
 420
 421	return dmaengine_slave_config(chan, &cfg);
 422}
 423
 424static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
 425{
 426	struct device *dev = sh_mmcif_host_to_dev(host);
 427	host->dma_active = false;
 428
 429	/* We can only either use DMA for both Tx and Rx or not use it at all */
 430	if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
 431		struct sh_mmcif_plat_data *pdata = dev->platform_data;
 432
 433		host->chan_tx = sh_mmcif_request_dma_pdata(host,
 434							pdata->slave_id_tx);
 435		host->chan_rx = sh_mmcif_request_dma_pdata(host,
 436							pdata->slave_id_rx);
 437	} else {
 438		host->chan_tx = dma_request_slave_channel(dev, "tx");
 439		host->chan_rx = dma_request_slave_channel(dev, "rx");
 
 
 
 
 440	}
 441	dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
 442		host->chan_rx);
 443
 444	if (!host->chan_tx || !host->chan_rx ||
 445	    sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
 446	    sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
 447		goto error;
 448
 449	return;
 450
 451error:
 452	if (host->chan_tx)
 453		dma_release_channel(host->chan_tx);
 454	if (host->chan_rx)
 455		dma_release_channel(host->chan_rx);
 456	host->chan_tx = host->chan_rx = NULL;
 457}
 458
 459static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
 460{
 461	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 462	/* Descriptors are freed automatically */
 463	if (host->chan_tx) {
 464		struct dma_chan *chan = host->chan_tx;
 465		host->chan_tx = NULL;
 466		dma_release_channel(chan);
 467	}
 468	if (host->chan_rx) {
 469		struct dma_chan *chan = host->chan_rx;
 470		host->chan_rx = NULL;
 471		dma_release_channel(chan);
 472	}
 473
 474	host->dma_active = false;
 475}
 476
 477static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
 478{
 479	struct device *dev = sh_mmcif_host_to_dev(host);
 480	struct sh_mmcif_plat_data *p = dev->platform_data;
 481	bool sup_pclk = p ? p->sup_pclk : false;
 482	unsigned int current_clk = clk_get_rate(host->clk);
 483	unsigned int clkdiv;
 484
 485	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 486	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
 487
 488	if (!clk)
 489		return;
 490
 491	if (host->clkdiv_map) {
 492		unsigned int freq, best_freq, myclk, div, diff_min, diff;
 493		int i;
 494
 495		clkdiv = 0;
 496		diff_min = ~0;
 497		best_freq = 0;
 498		for (i = 31; i >= 0; i--) {
 499			if (!((1 << i) & host->clkdiv_map))
 500				continue;
 501
 502			/*
 503			 * clk = parent_freq / div
 504			 * -> parent_freq = clk x div
 505			 */
 506
 507			div = 1 << (i + 1);
 508			freq = clk_round_rate(host->clk, clk * div);
 509			myclk = freq / div;
 510			diff = (myclk > clk) ? myclk - clk : clk - myclk;
 511
 512			if (diff <= diff_min) {
 513				best_freq = freq;
 514				clkdiv = i;
 515				diff_min = diff;
 516			}
 517		}
 518
 519		dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
 520			(best_freq / (1 << (clkdiv + 1))), clk,
 521			best_freq, clkdiv);
 522
 523		clk_set_rate(host->clk, best_freq);
 524		clkdiv = clkdiv << 16;
 525	} else if (sup_pclk && clk == current_clk) {
 526		clkdiv = CLK_SUP_PCLK;
 527	} else {
 528		clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
 529	}
 530
 531	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
 532	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 533}
 534
 535static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 536{
 537	u32 tmp;
 538
 539	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
 540
 541	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
 542	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
 543	if (host->ccs_enable)
 544		tmp |= SCCSTO_29;
 545	if (host->clk_ctrl2_enable)
 546		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
 547	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
 548		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
 549	/* byte swap on */
 550	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
 551}
 552
 553static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 554{
 555	struct device *dev = sh_mmcif_host_to_dev(host);
 556	u32 state1, state2;
 557	int ret, timeout;
 558
 559	host->sd_error = false;
 560
 561	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
 562	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
 563	dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
 564	dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
 565
 566	if (state1 & STS1_CMDSEQ) {
 567		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
 568		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
 569		for (timeout = 10000; timeout; timeout--) {
 570			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
 571			      & STS1_CMDSEQ))
 572				break;
 573			mdelay(1);
 574		}
 575		if (!timeout) {
 576			dev_err(dev,
 577				"Forced end of command sequence timeout err\n");
 578			return -EIO;
 579		}
 580		sh_mmcif_sync_reset(host);
 581		dev_dbg(dev, "Forced end of command sequence\n");
 582		return -EIO;
 583	}
 584
 585	if (state2 & STS2_CRC_ERR) {
 586		dev_err(dev, " CRC error: state %u, wait %u\n",
 587			host->state, host->wait_for);
 588		ret = -EIO;
 589	} else if (state2 & STS2_TIMEOUT_ERR) {
 590		dev_err(dev, " Timeout: state %u, wait %u\n",
 591			host->state, host->wait_for);
 592		ret = -ETIMEDOUT;
 593	} else {
 594		dev_dbg(dev, " End/Index error: state %u, wait %u\n",
 595			host->state, host->wait_for);
 596		ret = -EIO;
 597	}
 598	return ret;
 599}
 600
 601static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 602{
 603	struct mmc_data *data = host->mrq->data;
 604
 605	host->sg_blkidx += host->blocksize;
 606
 607	/* data->sg->length must be a multiple of host->blocksize? */
 608	BUG_ON(host->sg_blkidx > data->sg->length);
 609
 610	if (host->sg_blkidx == data->sg->length) {
 611		host->sg_blkidx = 0;
 612		if (++host->sg_idx < data->sg_len)
 613			host->pio_ptr = sg_virt(++data->sg);
 614	} else {
 615		host->pio_ptr = p;
 616	}
 617
 618	return host->sg_idx != data->sg_len;
 619}
 620
 621static void sh_mmcif_single_read(struct sh_mmcif_host *host,
 622				 struct mmc_request *mrq)
 623{
 624	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 625			   BLOCK_SIZE_MASK) + 3;
 626
 627	host->wait_for = MMCIF_WAIT_FOR_READ;
 628
 629	/* buf read enable */
 630	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 631}
 632
 633static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
 634{
 635	struct device *dev = sh_mmcif_host_to_dev(host);
 636	struct mmc_data *data = host->mrq->data;
 637	u32 *p = sg_virt(data->sg);
 638	int i;
 639
 640	if (host->sd_error) {
 641		data->error = sh_mmcif_error_manage(host);
 642		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 643		return false;
 644	}
 645
 646	for (i = 0; i < host->blocksize / 4; i++)
 647		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 648
 649	/* buffer read end */
 650	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
 651	host->wait_for = MMCIF_WAIT_FOR_READ_END;
 652
 653	return true;
 654}
 655
 656static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
 657				struct mmc_request *mrq)
 658{
 659	struct mmc_data *data = mrq->data;
 660
 661	if (!data->sg_len || !data->sg->length)
 662		return;
 663
 664	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 665		BLOCK_SIZE_MASK;
 666
 667	host->wait_for = MMCIF_WAIT_FOR_MREAD;
 668	host->sg_idx = 0;
 669	host->sg_blkidx = 0;
 670	host->pio_ptr = sg_virt(data->sg);
 671
 672	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 673}
 674
 675static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
 676{
 677	struct device *dev = sh_mmcif_host_to_dev(host);
 678	struct mmc_data *data = host->mrq->data;
 679	u32 *p = host->pio_ptr;
 680	int i;
 681
 682	if (host->sd_error) {
 683		data->error = sh_mmcif_error_manage(host);
 684		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 685		return false;
 686	}
 687
 688	BUG_ON(!data->sg->length);
 689
 690	for (i = 0; i < host->blocksize / 4; i++)
 691		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 692
 693	if (!sh_mmcif_next_block(host, p))
 694		return false;
 695
 696	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 697
 698	return true;
 699}
 700
 701static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 702					struct mmc_request *mrq)
 703{
 704	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 705			   BLOCK_SIZE_MASK) + 3;
 706
 707	host->wait_for = MMCIF_WAIT_FOR_WRITE;
 708
 709	/* buf write enable */
 710	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 711}
 712
 713static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
 714{
 715	struct device *dev = sh_mmcif_host_to_dev(host);
 716	struct mmc_data *data = host->mrq->data;
 717	u32 *p = sg_virt(data->sg);
 718	int i;
 719
 720	if (host->sd_error) {
 721		data->error = sh_mmcif_error_manage(host);
 722		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 723		return false;
 724	}
 725
 726	for (i = 0; i < host->blocksize / 4; i++)
 727		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 728
 729	/* buffer write end */
 730	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
 731	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
 732
 733	return true;
 734}
 735
 736static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
 737				struct mmc_request *mrq)
 738{
 739	struct mmc_data *data = mrq->data;
 740
 741	if (!data->sg_len || !data->sg->length)
 742		return;
 743
 744	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 745		BLOCK_SIZE_MASK;
 746
 747	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
 748	host->sg_idx = 0;
 749	host->sg_blkidx = 0;
 750	host->pio_ptr = sg_virt(data->sg);
 751
 752	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 753}
 754
 755static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
 756{
 757	struct device *dev = sh_mmcif_host_to_dev(host);
 758	struct mmc_data *data = host->mrq->data;
 759	u32 *p = host->pio_ptr;
 760	int i;
 761
 762	if (host->sd_error) {
 763		data->error = sh_mmcif_error_manage(host);
 764		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 765		return false;
 766	}
 767
 768	BUG_ON(!data->sg->length);
 769
 770	for (i = 0; i < host->blocksize / 4; i++)
 771		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 772
 773	if (!sh_mmcif_next_block(host, p))
 774		return false;
 775
 776	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 777
 778	return true;
 779}
 780
 781static void sh_mmcif_get_response(struct sh_mmcif_host *host,
 782						struct mmc_command *cmd)
 783{
 784	if (cmd->flags & MMC_RSP_136) {
 785		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
 786		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
 787		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
 788		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
 789	} else
 790		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
 791}
 792
 793static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
 794						struct mmc_command *cmd)
 795{
 796	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
 797}
 798
 799static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 800			    struct mmc_request *mrq)
 801{
 802	struct device *dev = sh_mmcif_host_to_dev(host);
 803	struct mmc_data *data = mrq->data;
 804	struct mmc_command *cmd = mrq->cmd;
 805	u32 opc = cmd->opcode;
 806	u32 tmp = 0;
 807
 808	/* Response Type check */
 809	switch (mmc_resp_type(cmd)) {
 810	case MMC_RSP_NONE:
 811		tmp |= CMD_SET_RTYP_NO;
 812		break;
 813	case MMC_RSP_R1:
 814	case MMC_RSP_R3:
 815		tmp |= CMD_SET_RTYP_6B;
 816		break;
 817	case MMC_RSP_R1B:
 818		tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
 819		break;
 820	case MMC_RSP_R2:
 821		tmp |= CMD_SET_RTYP_17B;
 822		break;
 823	default:
 824		dev_err(dev, "Unsupported response type.\n");
 825		break;
 826	}
 827
 828	/* WDAT / DATW */
 829	if (data) {
 830		tmp |= CMD_SET_WDAT;
 831		switch (host->bus_width) {
 832		case MMC_BUS_WIDTH_1:
 833			tmp |= CMD_SET_DATW_1;
 834			break;
 835		case MMC_BUS_WIDTH_4:
 836			tmp |= CMD_SET_DATW_4;
 837			break;
 838		case MMC_BUS_WIDTH_8:
 839			tmp |= CMD_SET_DATW_8;
 840			break;
 841		default:
 842			dev_err(dev, "Unsupported bus width.\n");
 843			break;
 844		}
 845		switch (host->timing) {
 846		case MMC_TIMING_MMC_DDR52:
 847			/*
 848			 * MMC core will only set this timing, if the host
 849			 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
 850			 * capability. MMCIF implementations with this
 851			 * capability, e.g. sh73a0, will have to set it
 852			 * in their platform data.
 853			 */
 854			tmp |= CMD_SET_DARS;
 855			break;
 856		}
 857	}
 858	/* DWEN */
 859	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
 860		tmp |= CMD_SET_DWEN;
 861	/* CMLTE/CMD12EN */
 862	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
 863		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
 864		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
 865				data->blocks << 16);
 866	}
 867	/* RIDXC[1:0] check bits */
 868	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
 869	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
 870		tmp |= CMD_SET_RIDXC_BITS;
 871	/* RCRC7C[1:0] check bits */
 872	if (opc == MMC_SEND_OP_COND)
 873		tmp |= CMD_SET_CRC7C_BITS;
 874	/* RCRC7C[1:0] internal CRC7 */
 875	if (opc == MMC_ALL_SEND_CID ||
 876		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
 877		tmp |= CMD_SET_CRC7C_INTERNAL;
 878
 879	return (opc << 24) | tmp;
 880}
 881
 882static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 883			       struct mmc_request *mrq, u32 opc)
 884{
 885	struct device *dev = sh_mmcif_host_to_dev(host);
 886
 887	switch (opc) {
 888	case MMC_READ_MULTIPLE_BLOCK:
 889		sh_mmcif_multi_read(host, mrq);
 890		return 0;
 891	case MMC_WRITE_MULTIPLE_BLOCK:
 892		sh_mmcif_multi_write(host, mrq);
 893		return 0;
 894	case MMC_WRITE_BLOCK:
 895		sh_mmcif_single_write(host, mrq);
 896		return 0;
 897	case MMC_READ_SINGLE_BLOCK:
 898	case MMC_SEND_EXT_CSD:
 899		sh_mmcif_single_read(host, mrq);
 900		return 0;
 901	default:
 902		dev_err(dev, "Unsupported CMD%d\n", opc);
 903		return -EINVAL;
 904	}
 905}
 906
 907static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 908			       struct mmc_request *mrq)
 909{
 910	struct mmc_command *cmd = mrq->cmd;
 911	u32 opc;
 912	u32 mask = 0;
 913	unsigned long flags;
 914
 915	if (cmd->flags & MMC_RSP_BUSY)
 916		mask = MASK_START_CMD | MASK_MRBSYE;
 917	else
 918		mask = MASK_START_CMD | MASK_MCRSPE;
 919
 920	if (host->ccs_enable)
 921		mask |= MASK_MCCSTO;
 922
 923	if (mrq->data) {
 924		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
 925		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
 926				mrq->data->blksz);
 927	}
 928	opc = sh_mmcif_set_cmd(host, mrq);
 929
 930	if (host->ccs_enable)
 931		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
 932	else
 933		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
 934	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
 935	/* set arg */
 936	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
 937	/* set cmd */
 938	spin_lock_irqsave(&host->lock, flags);
 939	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 940
 941	host->wait_for = MMCIF_WAIT_FOR_CMD;
 942	schedule_delayed_work(&host->timeout_work, host->timeout);
 943	spin_unlock_irqrestore(&host->lock, flags);
 944}
 945
 946static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 947			      struct mmc_request *mrq)
 948{
 949	struct device *dev = sh_mmcif_host_to_dev(host);
 950
 951	switch (mrq->cmd->opcode) {
 952	case MMC_READ_MULTIPLE_BLOCK:
 953		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
 954		break;
 955	case MMC_WRITE_MULTIPLE_BLOCK:
 956		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
 957		break;
 958	default:
 959		dev_err(dev, "unsupported stop cmd\n");
 960		mrq->stop->error = sh_mmcif_error_manage(host);
 961		return;
 962	}
 963
 964	host->wait_for = MMCIF_WAIT_FOR_STOP;
 965}
 966
 967static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 968{
 969	struct sh_mmcif_host *host = mmc_priv(mmc);
 970	struct device *dev = sh_mmcif_host_to_dev(host);
 971	unsigned long flags;
 972
 973	spin_lock_irqsave(&host->lock, flags);
 974	if (host->state != STATE_IDLE) {
 975		dev_dbg(dev, "%s() rejected, state %u\n",
 976			__func__, host->state);
 977		spin_unlock_irqrestore(&host->lock, flags);
 978		mrq->cmd->error = -EAGAIN;
 979		mmc_request_done(mmc, mrq);
 980		return;
 981	}
 982
 983	host->state = STATE_REQUEST;
 984	spin_unlock_irqrestore(&host->lock, flags);
 985
 986	host->mrq = mrq;
 987
 988	sh_mmcif_start_cmd(host, mrq);
 989}
 990
 991static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
 992{
 993	struct device *dev = sh_mmcif_host_to_dev(host);
 994
 995	if (host->mmc->f_max) {
 996		unsigned int f_max, f_min = 0, f_min_old;
 997
 998		f_max = host->mmc->f_max;
 999		for (f_min_old = f_max; f_min_old > 2;) {
1000			f_min = clk_round_rate(host->clk, f_min_old / 2);
1001			if (f_min == f_min_old)
1002				break;
1003			f_min_old = f_min;
1004		}
1005
1006		/*
1007		 * This driver assumes this SoC is R-Car Gen2 or later
1008		 */
1009		host->clkdiv_map = 0x3ff;
1010
1011		host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
1012		host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
1013	} else {
1014		unsigned int clk = clk_get_rate(host->clk);
1015
1016		host->mmc->f_max = clk / 2;
1017		host->mmc->f_min = clk / 512;
1018	}
1019
1020	dev_dbg(dev, "clk max/min = %d/%d\n",
1021		host->mmc->f_max, host->mmc->f_min);
1022}
1023
1024static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1025{
1026	struct sh_mmcif_host *host = mmc_priv(mmc);
1027	struct device *dev = sh_mmcif_host_to_dev(host);
1028	unsigned long flags;
1029
1030	spin_lock_irqsave(&host->lock, flags);
1031	if (host->state != STATE_IDLE) {
1032		dev_dbg(dev, "%s() rejected, state %u\n",
1033			__func__, host->state);
1034		spin_unlock_irqrestore(&host->lock, flags);
1035		return;
1036	}
1037
1038	host->state = STATE_IOS;
1039	spin_unlock_irqrestore(&host->lock, flags);
1040
1041	switch (ios->power_mode) {
1042	case MMC_POWER_UP:
1043		if (!IS_ERR(mmc->supply.vmmc))
1044			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1045		if (!host->power) {
1046			clk_prepare_enable(host->clk);
1047			pm_runtime_get_sync(dev);
1048			sh_mmcif_sync_reset(host);
1049			sh_mmcif_request_dma(host);
1050			host->power = true;
1051		}
1052		break;
1053	case MMC_POWER_OFF:
1054		if (!IS_ERR(mmc->supply.vmmc))
1055			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1056		if (host->power) {
1057			sh_mmcif_clock_control(host, 0);
1058			sh_mmcif_release_dma(host);
1059			pm_runtime_put(dev);
1060			clk_disable_unprepare(host->clk);
1061			host->power = false;
1062		}
1063		break;
1064	case MMC_POWER_ON:
1065		sh_mmcif_clock_control(host, ios->clock);
1066		break;
1067	}
1068
1069	host->timing = ios->timing;
1070	host->bus_width = ios->bus_width;
1071	host->state = STATE_IDLE;
1072}
1073
1074static const struct mmc_host_ops sh_mmcif_ops = {
1075	.request	= sh_mmcif_request,
1076	.set_ios	= sh_mmcif_set_ios,
1077	.get_cd		= mmc_gpio_get_cd,
1078};
1079
1080static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1081{
1082	struct mmc_command *cmd = host->mrq->cmd;
1083	struct mmc_data *data = host->mrq->data;
1084	struct device *dev = sh_mmcif_host_to_dev(host);
1085	long time;
1086
1087	if (host->sd_error) {
1088		switch (cmd->opcode) {
1089		case MMC_ALL_SEND_CID:
1090		case MMC_SELECT_CARD:
1091		case MMC_APP_CMD:
1092			cmd->error = -ETIMEDOUT;
1093			break;
1094		default:
1095			cmd->error = sh_mmcif_error_manage(host);
1096			break;
1097		}
1098		dev_dbg(dev, "CMD%d error %d\n",
1099			cmd->opcode, cmd->error);
1100		host->sd_error = false;
1101		return false;
1102	}
1103	if (!(cmd->flags & MMC_RSP_PRESENT)) {
1104		cmd->error = 0;
1105		return false;
1106	}
1107
1108	sh_mmcif_get_response(host, cmd);
1109
1110	if (!data)
1111		return false;
1112
1113	/*
1114	 * Completion can be signalled from DMA callback and error, so, have to
1115	 * reset here, before setting .dma_active
1116	 */
1117	init_completion(&host->dma_complete);
1118
1119	if (data->flags & MMC_DATA_READ) {
1120		if (host->chan_rx)
1121			sh_mmcif_start_dma_rx(host);
1122	} else {
1123		if (host->chan_tx)
1124			sh_mmcif_start_dma_tx(host);
1125	}
1126
1127	if (!host->dma_active) {
1128		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1129		return !data->error;
1130	}
1131
1132	/* Running in the IRQ thread, can sleep */
1133	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1134							 host->timeout);
1135
1136	if (data->flags & MMC_DATA_READ)
1137		dma_unmap_sg(host->chan_rx->device->dev,
1138			     data->sg, data->sg_len,
1139			     DMA_FROM_DEVICE);
1140	else
1141		dma_unmap_sg(host->chan_tx->device->dev,
1142			     data->sg, data->sg_len,
1143			     DMA_TO_DEVICE);
1144
1145	if (host->sd_error) {
1146		dev_err(host->mmc->parent,
1147			"Error IRQ while waiting for DMA completion!\n");
1148		/* Woken up by an error IRQ: abort DMA */
1149		data->error = sh_mmcif_error_manage(host);
1150	} else if (!time) {
1151		dev_err(host->mmc->parent, "DMA timeout!\n");
1152		data->error = -ETIMEDOUT;
1153	} else if (time < 0) {
1154		dev_err(host->mmc->parent,
1155			"wait_for_completion_...() error %ld!\n", time);
1156		data->error = time;
1157	}
1158	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1159			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1160	host->dma_active = false;
1161
1162	if (data->error) {
1163		data->bytes_xfered = 0;
1164		/* Abort DMA */
1165		if (data->flags & MMC_DATA_READ)
1166			dmaengine_terminate_all(host->chan_rx);
1167		else
1168			dmaengine_terminate_all(host->chan_tx);
1169	}
1170
1171	return false;
1172}
1173
1174static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1175{
1176	struct sh_mmcif_host *host = dev_id;
1177	struct mmc_request *mrq;
1178	struct device *dev = sh_mmcif_host_to_dev(host);
1179	bool wait = false;
1180	unsigned long flags;
1181	int wait_work;
1182
1183	spin_lock_irqsave(&host->lock, flags);
1184	wait_work = host->wait_for;
1185	spin_unlock_irqrestore(&host->lock, flags);
1186
1187	cancel_delayed_work_sync(&host->timeout_work);
1188
1189	mutex_lock(&host->thread_lock);
1190
1191	mrq = host->mrq;
1192	if (!mrq) {
1193		dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1194			host->state, host->wait_for);
1195		mutex_unlock(&host->thread_lock);
1196		return IRQ_HANDLED;
1197	}
1198
1199	/*
1200	 * All handlers return true, if processing continues, and false, if the
1201	 * request has to be completed - successfully or not
1202	 */
1203	switch (wait_work) {
1204	case MMCIF_WAIT_FOR_REQUEST:
1205		/* We're too late, the timeout has already kicked in */
1206		mutex_unlock(&host->thread_lock);
1207		return IRQ_HANDLED;
1208	case MMCIF_WAIT_FOR_CMD:
1209		/* Wait for data? */
1210		wait = sh_mmcif_end_cmd(host);
1211		break;
1212	case MMCIF_WAIT_FOR_MREAD:
1213		/* Wait for more data? */
1214		wait = sh_mmcif_mread_block(host);
1215		break;
1216	case MMCIF_WAIT_FOR_READ:
1217		/* Wait for data end? */
1218		wait = sh_mmcif_read_block(host);
1219		break;
1220	case MMCIF_WAIT_FOR_MWRITE:
1221		/* Wait data to write? */
1222		wait = sh_mmcif_mwrite_block(host);
1223		break;
1224	case MMCIF_WAIT_FOR_WRITE:
1225		/* Wait for data end? */
1226		wait = sh_mmcif_write_block(host);
1227		break;
1228	case MMCIF_WAIT_FOR_STOP:
1229		if (host->sd_error) {
1230			mrq->stop->error = sh_mmcif_error_manage(host);
1231			dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
1232			break;
1233		}
1234		sh_mmcif_get_cmd12response(host, mrq->stop);
1235		mrq->stop->error = 0;
1236		break;
1237	case MMCIF_WAIT_FOR_READ_END:
1238	case MMCIF_WAIT_FOR_WRITE_END:
1239		if (host->sd_error) {
1240			mrq->data->error = sh_mmcif_error_manage(host);
1241			dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
1242		}
1243		break;
1244	default:
1245		BUG();
1246	}
1247
1248	if (wait) {
1249		schedule_delayed_work(&host->timeout_work, host->timeout);
1250		/* Wait for more data */
1251		mutex_unlock(&host->thread_lock);
1252		return IRQ_HANDLED;
1253	}
1254
1255	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1256		struct mmc_data *data = mrq->data;
1257		if (!mrq->cmd->error && data && !data->error)
1258			data->bytes_xfered =
1259				data->blocks * data->blksz;
1260
1261		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1262			sh_mmcif_stop_cmd(host, mrq);
1263			if (!mrq->stop->error) {
1264				schedule_delayed_work(&host->timeout_work, host->timeout);
1265				mutex_unlock(&host->thread_lock);
1266				return IRQ_HANDLED;
1267			}
1268		}
1269	}
1270
1271	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1272	host->state = STATE_IDLE;
1273	host->mrq = NULL;
1274	mmc_request_done(host->mmc, mrq);
1275
1276	mutex_unlock(&host->thread_lock);
1277
1278	return IRQ_HANDLED;
1279}
1280
1281static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1282{
1283	struct sh_mmcif_host *host = dev_id;
1284	struct device *dev = sh_mmcif_host_to_dev(host);
1285	u32 state, mask;
1286
1287	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1288	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1289	if (host->ccs_enable)
1290		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1291	else
1292		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1293	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1294
1295	if (state & ~MASK_CLEAN)
1296		dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
1297			state);
1298
1299	if (state & INT_ERR_STS || state & ~INT_ALL) {
1300		host->sd_error = true;
1301		dev_dbg(dev, "int err state = 0x%08x\n", state);
1302	}
1303	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1304		if (!host->mrq)
1305			dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
1306		if (!host->dma_active)
1307			return IRQ_WAKE_THREAD;
1308		else if (host->sd_error)
1309			sh_mmcif_dma_complete(host);
1310	} else {
1311		dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
1312	}
1313
1314	return IRQ_HANDLED;
1315}
1316
1317static void sh_mmcif_timeout_work(struct work_struct *work)
1318{
1319	struct delayed_work *d = to_delayed_work(work);
1320	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1321	struct mmc_request *mrq = host->mrq;
1322	struct device *dev = sh_mmcif_host_to_dev(host);
1323	unsigned long flags;
1324
1325	if (host->dying)
1326		/* Don't run after mmc_remove_host() */
1327		return;
1328
1329	spin_lock_irqsave(&host->lock, flags);
1330	if (host->state == STATE_IDLE) {
1331		spin_unlock_irqrestore(&host->lock, flags);
1332		return;
1333	}
1334
1335	dev_err(dev, "Timeout waiting for %u on CMD%u\n",
1336		host->wait_for, mrq->cmd->opcode);
1337
1338	host->state = STATE_TIMEOUT;
1339	spin_unlock_irqrestore(&host->lock, flags);
1340
1341	/*
1342	 * Handle races with cancel_delayed_work(), unless
1343	 * cancel_delayed_work_sync() is used
1344	 */
1345	switch (host->wait_for) {
1346	case MMCIF_WAIT_FOR_CMD:
1347		mrq->cmd->error = sh_mmcif_error_manage(host);
1348		break;
1349	case MMCIF_WAIT_FOR_STOP:
1350		mrq->stop->error = sh_mmcif_error_manage(host);
1351		break;
1352	case MMCIF_WAIT_FOR_MREAD:
1353	case MMCIF_WAIT_FOR_MWRITE:
1354	case MMCIF_WAIT_FOR_READ:
1355	case MMCIF_WAIT_FOR_WRITE:
1356	case MMCIF_WAIT_FOR_READ_END:
1357	case MMCIF_WAIT_FOR_WRITE_END:
1358		mrq->data->error = sh_mmcif_error_manage(host);
1359		break;
1360	default:
1361		BUG();
1362	}
1363
1364	host->state = STATE_IDLE;
1365	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1366	host->mrq = NULL;
1367	mmc_request_done(host->mmc, mrq);
1368}
1369
1370static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1371{
1372	struct device *dev = sh_mmcif_host_to_dev(host);
1373	struct sh_mmcif_plat_data *pd = dev->platform_data;
1374	struct mmc_host *mmc = host->mmc;
1375
1376	mmc_regulator_get_supply(mmc);
1377
1378	if (!pd)
1379		return;
1380
1381	if (!mmc->ocr_avail)
1382		mmc->ocr_avail = pd->ocr;
1383	else if (pd->ocr)
1384		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1385}
1386
1387static int sh_mmcif_probe(struct platform_device *pdev)
1388{
1389	int ret = 0, irq[2];
1390	struct mmc_host *mmc;
1391	struct sh_mmcif_host *host;
1392	struct device *dev = &pdev->dev;
1393	struct sh_mmcif_plat_data *pd = dev->platform_data;
1394	struct resource *res;
1395	void __iomem *reg;
1396	const char *name;
1397
1398	irq[0] = platform_get_irq(pdev, 0);
1399	irq[1] = platform_get_irq(pdev, 1);
1400	if (irq[0] < 0) {
1401		dev_err(dev, "Get irq error\n");
1402		return -ENXIO;
1403	}
1404
1405	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1406	reg = devm_ioremap_resource(dev, res);
1407	if (IS_ERR(reg))
1408		return PTR_ERR(reg);
1409
1410	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
1411	if (!mmc)
1412		return -ENOMEM;
1413
1414	ret = mmc_of_parse(mmc);
1415	if (ret < 0)
1416		goto err_host;
1417
1418	host		= mmc_priv(mmc);
1419	host->mmc	= mmc;
1420	host->addr	= reg;
1421	host->timeout	= msecs_to_jiffies(10000);
1422	host->ccs_enable = true;
1423	host->clk_ctrl2_enable = false;
1424
1425	host->pd = pdev;
1426
1427	spin_lock_init(&host->lock);
1428
1429	mmc->ops = &sh_mmcif_ops;
1430	sh_mmcif_init_ocr(host);
1431
1432	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1433	mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1434	mmc->max_busy_timeout = 10000;
1435
1436	if (pd && pd->caps)
1437		mmc->caps |= pd->caps;
1438	mmc->max_segs = 32;
1439	mmc->max_blk_size = 512;
1440	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1441	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1442	mmc->max_seg_size = mmc->max_req_size;
1443
1444	platform_set_drvdata(pdev, host);
1445
1446	host->clk = devm_clk_get(dev, NULL);
1447	if (IS_ERR(host->clk)) {
1448		ret = PTR_ERR(host->clk);
1449		dev_err(dev, "cannot get clock: %d\n", ret);
1450		goto err_host;
1451	}
1452
1453	ret = clk_prepare_enable(host->clk);
1454	if (ret < 0)
1455		goto err_host;
1456
1457	sh_mmcif_clk_setup(host);
1458
1459	pm_runtime_enable(dev);
1460	host->power = false;
1461
1462	ret = pm_runtime_get_sync(dev);
1463	if (ret < 0)
1464		goto err_clk;
1465
1466	INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
1467
1468	sh_mmcif_sync_reset(host);
1469	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1470
1471	name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
1472	ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
1473					sh_mmcif_irqt, 0, name, host);
1474	if (ret) {
1475		dev_err(dev, "request_irq error (%s)\n", name);
1476		goto err_clk;
1477	}
1478	if (irq[1] >= 0) {
1479		ret = devm_request_threaded_irq(dev, irq[1],
1480						sh_mmcif_intr, sh_mmcif_irqt,
1481						0, "sh_mmc:int", host);
1482		if (ret) {
1483			dev_err(dev, "request_irq error (sh_mmc:int)\n");
1484			goto err_clk;
1485		}
1486	}
1487
1488	mutex_init(&host->thread_lock);
1489
1490	ret = mmc_add_host(mmc);
1491	if (ret < 0)
1492		goto err_clk;
1493
1494	dev_pm_qos_expose_latency_limit(dev, 100);
1495
1496	dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
1497		 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1498		 clk_get_rate(host->clk) / 1000000UL);
1499
1500	pm_runtime_put(dev);
1501	clk_disable_unprepare(host->clk);
1502	return ret;
1503
1504err_clk:
1505	clk_disable_unprepare(host->clk);
1506	pm_runtime_put_sync(dev);
1507	pm_runtime_disable(dev);
1508err_host:
1509	mmc_free_host(mmc);
1510	return ret;
1511}
1512
1513static int sh_mmcif_remove(struct platform_device *pdev)
1514{
1515	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1516
1517	host->dying = true;
1518	clk_prepare_enable(host->clk);
1519	pm_runtime_get_sync(&pdev->dev);
1520
1521	dev_pm_qos_hide_latency_limit(&pdev->dev);
1522
1523	mmc_remove_host(host->mmc);
1524	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1525
1526	/*
1527	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1528	 * mmc_remove_host() call above. But swapping order doesn't help either
1529	 * (a query on the linux-mmc mailing list didn't bring any replies).
1530	 */
1531	cancel_delayed_work_sync(&host->timeout_work);
1532
1533	clk_disable_unprepare(host->clk);
1534	mmc_free_host(host->mmc);
1535	pm_runtime_put_sync(&pdev->dev);
1536	pm_runtime_disable(&pdev->dev);
1537
1538	return 0;
1539}
1540
1541#ifdef CONFIG_PM_SLEEP
1542static int sh_mmcif_suspend(struct device *dev)
1543{
1544	struct sh_mmcif_host *host = dev_get_drvdata(dev);
1545
1546	pm_runtime_get_sync(dev);
1547	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1548	pm_runtime_put(dev);
1549
1550	return 0;
1551}
1552
1553static int sh_mmcif_resume(struct device *dev)
1554{
1555	return 0;
1556}
1557#endif
1558
1559static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1560	SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1561};
1562
1563static struct platform_driver sh_mmcif_driver = {
1564	.probe		= sh_mmcif_probe,
1565	.remove		= sh_mmcif_remove,
1566	.driver		= {
1567		.name	= DRIVER_NAME,
 
1568		.pm	= &sh_mmcif_dev_pm_ops,
1569		.of_match_table = sh_mmcif_of_match,
1570	},
1571};
1572
1573module_platform_driver(sh_mmcif_driver);
1574
1575MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1576MODULE_LICENSE("GPL");
1577MODULE_ALIAS("platform:" DRIVER_NAME);
1578MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MMCIF eMMC driver.
   4 *
   5 * Copyright (C) 2010 Renesas Solutions Corp.
   6 * Yusuke Goda <yusuke.goda.sx@renesas.com>
 
 
 
 
   7 */
   8
   9/*
  10 * The MMCIF driver is now processing MMC requests asynchronously, according
  11 * to the Linux MMC API requirement.
  12 *
  13 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
  14 * data, and optional stop. To achieve asynchronous processing each of these
  15 * stages is split into two halves: a top and a bottom half. The top half
  16 * initialises the hardware, installs a timeout handler to handle completion
  17 * timeouts, and returns. In case of the command stage this immediately returns
  18 * control to the caller, leaving all further processing to run asynchronously.
  19 * All further request processing is performed by the bottom halves.
  20 *
  21 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
  22 * thread, a DMA completion callback, if DMA is used, a timeout work, and
  23 * request- and stage-specific handler methods.
  24 *
  25 * Each bottom half run begins with either a hardware interrupt, a DMA callback
  26 * invocation, or a timeout work run. In case of an error or a successful
  27 * processing completion, the MMC core is informed and the request processing is
  28 * finished. In case processing has to continue, i.e., if data has to be read
  29 * from or written to the card, or if a stop command has to be sent, the next
  30 * top half is called, which performs the necessary hardware handling and
  31 * reschedules the timeout work. This returns the driver state machine into the
  32 * bottom half waiting state.
  33 */
  34
  35#include <linux/bitops.h>
  36#include <linux/clk.h>
  37#include <linux/completion.h>
  38#include <linux/delay.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/dmaengine.h>
  41#include <linux/mmc/card.h>
  42#include <linux/mmc/core.h>
  43#include <linux/mmc/host.h>
  44#include <linux/mmc/mmc.h>
  45#include <linux/mmc/sdio.h>
 
  46#include <linux/mmc/slot-gpio.h>
  47#include <linux/mod_devicetable.h>
  48#include <linux/mutex.h>
 
  49#include <linux/pagemap.h>
  50#include <linux/platform_data/sh_mmcif.h>
  51#include <linux/platform_device.h>
  52#include <linux/pm_qos.h>
  53#include <linux/pm_runtime.h>
  54#include <linux/sh_dma.h>
  55#include <linux/spinlock.h>
  56#include <linux/module.h>
  57
  58#define DRIVER_NAME	"sh_mmcif"
  59
  60/* CE_CMD_SET */
  61#define CMD_MASK		0x3f000000
  62#define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
  63#define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
  64#define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
  65#define CMD_SET_RBSY		(1 << 21) /* R1b */
  66#define CMD_SET_CCSEN		(1 << 20)
  67#define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
  68#define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
  69#define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
  70#define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
  71#define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
  72#define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
  73#define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
  74#define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
  75#define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
  76#define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
  77#define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
  78#define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
  79#define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
  80#define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
  81#define CMD_SET_CCSH		(1 << 5)
  82#define CMD_SET_DARS		(1 << 2) /* Dual Data Rate */
  83#define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
  84#define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
  85#define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
  86
  87/* CE_CMD_CTRL */
  88#define CMD_CTRL_BREAK		(1 << 0)
  89
  90/* CE_BLOCK_SET */
  91#define BLOCK_SIZE_MASK		0x0000ffff
  92
  93/* CE_INT */
  94#define INT_CCSDE		(1 << 29)
  95#define INT_CMD12DRE		(1 << 26)
  96#define INT_CMD12RBE		(1 << 25)
  97#define INT_CMD12CRE		(1 << 24)
  98#define INT_DTRANE		(1 << 23)
  99#define INT_BUFRE		(1 << 22)
 100#define INT_BUFWEN		(1 << 21)
 101#define INT_BUFREN		(1 << 20)
 102#define INT_CCSRCV		(1 << 19)
 103#define INT_RBSYE		(1 << 17)
 104#define INT_CRSPE		(1 << 16)
 105#define INT_CMDVIO		(1 << 15)
 106#define INT_BUFVIO		(1 << 14)
 107#define INT_WDATERR		(1 << 11)
 108#define INT_RDATERR		(1 << 10)
 109#define INT_RIDXERR		(1 << 9)
 110#define INT_RSPERR		(1 << 8)
 111#define INT_CCSTO		(1 << 5)
 112#define INT_CRCSTO		(1 << 4)
 113#define INT_WDATTO		(1 << 3)
 114#define INT_RDATTO		(1 << 2)
 115#define INT_RBSYTO		(1 << 1)
 116#define INT_RSPTO		(1 << 0)
 117#define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
 118				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
 119				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
 120				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
 121
 122#define INT_ALL			(INT_RBSYE | INT_CRSPE | INT_BUFREN |	 \
 123				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
 124				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
 125
 126#define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE)
 127
 128/* CE_INT_MASK */
 129#define MASK_ALL		0x00000000
 130#define MASK_MCCSDE		(1 << 29)
 131#define MASK_MCMD12DRE		(1 << 26)
 132#define MASK_MCMD12RBE		(1 << 25)
 133#define MASK_MCMD12CRE		(1 << 24)
 134#define MASK_MDTRANE		(1 << 23)
 135#define MASK_MBUFRE		(1 << 22)
 136#define MASK_MBUFWEN		(1 << 21)
 137#define MASK_MBUFREN		(1 << 20)
 138#define MASK_MCCSRCV		(1 << 19)
 139#define MASK_MRBSYE		(1 << 17)
 140#define MASK_MCRSPE		(1 << 16)
 141#define MASK_MCMDVIO		(1 << 15)
 142#define MASK_MBUFVIO		(1 << 14)
 143#define MASK_MWDATERR		(1 << 11)
 144#define MASK_MRDATERR		(1 << 10)
 145#define MASK_MRIDXERR		(1 << 9)
 146#define MASK_MRSPERR		(1 << 8)
 147#define MASK_MCCSTO		(1 << 5)
 148#define MASK_MCRCSTO		(1 << 4)
 149#define MASK_MWDATTO		(1 << 3)
 150#define MASK_MRDATTO		(1 << 2)
 151#define MASK_MRBSYTO		(1 << 1)
 152#define MASK_MRSPTO		(1 << 0)
 153
 154#define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
 155				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
 156				 MASK_MCRCSTO | MASK_MWDATTO | \
 157				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
 158
 159#define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\
 160				 MASK_MBUFREN | MASK_MBUFWEN |			\
 161				 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |	\
 162				 MASK_MCMD12RBE | MASK_MCMD12CRE)
 163
 164/* CE_HOST_STS1 */
 165#define STS1_CMDSEQ		(1 << 31)
 166
 167/* CE_HOST_STS2 */
 168#define STS2_CRCSTE		(1 << 31)
 169#define STS2_CRC16E		(1 << 30)
 170#define STS2_AC12CRCE		(1 << 29)
 171#define STS2_RSPCRC7E		(1 << 28)
 172#define STS2_CRCSTEBE		(1 << 27)
 173#define STS2_RDATEBE		(1 << 26)
 174#define STS2_AC12REBE		(1 << 25)
 175#define STS2_RSPEBE		(1 << 24)
 176#define STS2_AC12IDXE		(1 << 23)
 177#define STS2_RSPIDXE		(1 << 22)
 178#define STS2_CCSTO		(1 << 15)
 179#define STS2_RDATTO		(1 << 14)
 180#define STS2_DATBSYTO		(1 << 13)
 181#define STS2_CRCSTTO		(1 << 12)
 182#define STS2_AC12BSYTO		(1 << 11)
 183#define STS2_RSPBSYTO		(1 << 10)
 184#define STS2_AC12RSPTO		(1 << 9)
 185#define STS2_RSPTO		(1 << 8)
 186#define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
 187				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
 188#define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
 189				 STS2_DATBSYTO | STS2_CRCSTTO |		\
 190				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
 191				 STS2_AC12RSPTO | STS2_RSPTO)
 192
 193#define CLKDEV_EMMC_DATA	52000000 /* 52 MHz */
 194#define CLKDEV_MMC_DATA		20000000 /* 20 MHz */
 195#define CLKDEV_INIT		400000   /* 400 kHz */
 196
 197enum sh_mmcif_state {
 198	STATE_IDLE,
 199	STATE_REQUEST,
 200	STATE_IOS,
 201	STATE_TIMEOUT,
 202};
 203
 204enum sh_mmcif_wait_for {
 205	MMCIF_WAIT_FOR_REQUEST,
 206	MMCIF_WAIT_FOR_CMD,
 207	MMCIF_WAIT_FOR_MREAD,
 208	MMCIF_WAIT_FOR_MWRITE,
 209	MMCIF_WAIT_FOR_READ,
 210	MMCIF_WAIT_FOR_WRITE,
 211	MMCIF_WAIT_FOR_READ_END,
 212	MMCIF_WAIT_FOR_WRITE_END,
 213	MMCIF_WAIT_FOR_STOP,
 214};
 215
 216/*
 217 * difference for each SoC
 218 */
 219struct sh_mmcif_host {
 220	struct mmc_host *mmc;
 221	struct mmc_request *mrq;
 222	struct platform_device *pd;
 223	struct clk *clk;
 224	int bus_width;
 225	unsigned char timing;
 226	bool sd_error;
 227	bool dying;
 228	long timeout;
 229	void __iomem *addr;
 230	u32 *pio_ptr;
 231	spinlock_t lock;		/* protect sh_mmcif_host::state */
 232	enum sh_mmcif_state state;
 233	enum sh_mmcif_wait_for wait_for;
 234	struct delayed_work timeout_work;
 235	size_t blocksize;
 236	int sg_idx;
 237	int sg_blkidx;
 238	bool power;
 239	bool ccs_enable;		/* Command Completion Signal support */
 240	bool clk_ctrl2_enable;
 241	struct mutex thread_lock;
 242	u32 clkdiv_map;         /* see CE_CLK_CTRL::CLKDIV */
 243
 244	/* DMA support */
 245	struct dma_chan		*chan_rx;
 246	struct dma_chan		*chan_tx;
 247	struct completion	dma_complete;
 248	bool			dma_active;
 249};
 250
 251static const struct of_device_id sh_mmcif_of_match[] = {
 252	{ .compatible = "renesas,sh-mmcif" },
 253	{ }
 254};
 255MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
 256
 257#define sh_mmcif_host_to_dev(host) (&host->pd->dev)
 258
 259static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
 260					unsigned int reg, u32 val)
 261{
 262	writel(val | readl(host->addr + reg), host->addr + reg);
 263}
 264
 265static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
 266					unsigned int reg, u32 val)
 267{
 268	writel(~val & readl(host->addr + reg), host->addr + reg);
 269}
 270
 271static void sh_mmcif_dma_complete(void *arg)
 272{
 273	struct sh_mmcif_host *host = arg;
 274	struct mmc_request *mrq = host->mrq;
 275	struct device *dev = sh_mmcif_host_to_dev(host);
 276
 277	dev_dbg(dev, "Command completed\n");
 278
 279	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
 280		 dev_name(dev)))
 281		return;
 282
 283	complete(&host->dma_complete);
 284}
 285
 286static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 287{
 288	struct mmc_data *data = host->mrq->data;
 289	struct scatterlist *sg = data->sg;
 290	struct dma_async_tx_descriptor *desc = NULL;
 291	struct dma_chan *chan = host->chan_rx;
 292	struct device *dev = sh_mmcif_host_to_dev(host);
 293	dma_cookie_t cookie = -EINVAL;
 294	int ret;
 295
 296	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 297			 DMA_FROM_DEVICE);
 298	if (ret > 0) {
 299		host->dma_active = true;
 300		desc = dmaengine_prep_slave_sg(chan, sg, ret,
 301			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 302	}
 303
 304	if (desc) {
 305		desc->callback = sh_mmcif_dma_complete;
 306		desc->callback_param = host;
 307		cookie = dmaengine_submit(desc);
 308		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
 309		dma_async_issue_pending(chan);
 310	}
 311	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 312		__func__, data->sg_len, ret, cookie);
 313
 314	if (!desc) {
 315		/* DMA failed, fall back to PIO */
 316		if (ret >= 0)
 317			ret = -EIO;
 318		host->chan_rx = NULL;
 319		host->dma_active = false;
 320		dma_release_channel(chan);
 321		/* Free the Tx channel too */
 322		chan = host->chan_tx;
 323		if (chan) {
 324			host->chan_tx = NULL;
 325			dma_release_channel(chan);
 326		}
 327		dev_warn(dev,
 328			 "DMA failed: %d, falling back to PIO\n", ret);
 329		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 330	}
 331
 332	dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
 333		desc, cookie, data->sg_len);
 334}
 335
 336static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 337{
 338	struct mmc_data *data = host->mrq->data;
 339	struct scatterlist *sg = data->sg;
 340	struct dma_async_tx_descriptor *desc = NULL;
 341	struct dma_chan *chan = host->chan_tx;
 342	struct device *dev = sh_mmcif_host_to_dev(host);
 343	dma_cookie_t cookie = -EINVAL;
 344	int ret;
 345
 346	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 347			 DMA_TO_DEVICE);
 348	if (ret > 0) {
 349		host->dma_active = true;
 350		desc = dmaengine_prep_slave_sg(chan, sg, ret,
 351			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 352	}
 353
 354	if (desc) {
 355		desc->callback = sh_mmcif_dma_complete;
 356		desc->callback_param = host;
 357		cookie = dmaengine_submit(desc);
 358		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
 359		dma_async_issue_pending(chan);
 360	}
 361	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 362		__func__, data->sg_len, ret, cookie);
 363
 364	if (!desc) {
 365		/* DMA failed, fall back to PIO */
 366		if (ret >= 0)
 367			ret = -EIO;
 368		host->chan_tx = NULL;
 369		host->dma_active = false;
 370		dma_release_channel(chan);
 371		/* Free the Rx channel too */
 372		chan = host->chan_rx;
 373		if (chan) {
 374			host->chan_rx = NULL;
 375			dma_release_channel(chan);
 376		}
 377		dev_warn(dev,
 378			 "DMA failed: %d, falling back to PIO\n", ret);
 379		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 380	}
 381
 382	dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
 383		desc, cookie);
 384}
 385
 386static struct dma_chan *
 387sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
 388{
 389	dma_cap_mask_t mask;
 390
 391	dma_cap_zero(mask);
 392	dma_cap_set(DMA_SLAVE, mask);
 393	if (slave_id <= 0)
 394		return NULL;
 395
 396	return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
 397}
 398
 399static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
 400				     struct dma_chan *chan,
 401				     enum dma_transfer_direction direction)
 402{
 403	struct resource *res;
 404	struct dma_slave_config cfg = { 0, };
 405
 406	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
 407	if (!res)
 408		return -EINVAL;
 409
 410	cfg.direction = direction;
 411
 412	if (direction == DMA_DEV_TO_MEM) {
 413		cfg.src_addr = res->start + MMCIF_CE_DATA;
 414		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 415	} else {
 416		cfg.dst_addr = res->start + MMCIF_CE_DATA;
 417		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 418	}
 419
 420	return dmaengine_slave_config(chan, &cfg);
 421}
 422
 423static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
 424{
 425	struct device *dev = sh_mmcif_host_to_dev(host);
 426	host->dma_active = false;
 427
 428	/* We can only either use DMA for both Tx and Rx or not use it at all */
 429	if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
 430		struct sh_mmcif_plat_data *pdata = dev->platform_data;
 431
 432		host->chan_tx = sh_mmcif_request_dma_pdata(host,
 433							pdata->slave_id_tx);
 434		host->chan_rx = sh_mmcif_request_dma_pdata(host,
 435							pdata->slave_id_rx);
 436	} else {
 437		host->chan_tx = dma_request_chan(dev, "tx");
 438		if (IS_ERR(host->chan_tx))
 439			host->chan_tx = NULL;
 440		host->chan_rx = dma_request_chan(dev, "rx");
 441		if (IS_ERR(host->chan_rx))
 442			host->chan_rx = NULL;
 443	}
 444	dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
 445		host->chan_rx);
 446
 447	if (!host->chan_tx || !host->chan_rx ||
 448	    sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
 449	    sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
 450		goto error;
 451
 452	return;
 453
 454error:
 455	if (host->chan_tx)
 456		dma_release_channel(host->chan_tx);
 457	if (host->chan_rx)
 458		dma_release_channel(host->chan_rx);
 459	host->chan_tx = host->chan_rx = NULL;
 460}
 461
 462static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
 463{
 464	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 465	/* Descriptors are freed automatically */
 466	if (host->chan_tx) {
 467		struct dma_chan *chan = host->chan_tx;
 468		host->chan_tx = NULL;
 469		dma_release_channel(chan);
 470	}
 471	if (host->chan_rx) {
 472		struct dma_chan *chan = host->chan_rx;
 473		host->chan_rx = NULL;
 474		dma_release_channel(chan);
 475	}
 476
 477	host->dma_active = false;
 478}
 479
 480static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
 481{
 482	struct device *dev = sh_mmcif_host_to_dev(host);
 483	struct sh_mmcif_plat_data *p = dev->platform_data;
 484	bool sup_pclk = p ? p->sup_pclk : false;
 485	unsigned int current_clk = clk_get_rate(host->clk);
 486	unsigned int clkdiv;
 487
 488	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 489	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
 490
 491	if (!clk)
 492		return;
 493
 494	if (host->clkdiv_map) {
 495		unsigned int freq, best_freq, myclk, div, diff_min, diff;
 496		int i;
 497
 498		clkdiv = 0;
 499		diff_min = ~0;
 500		best_freq = 0;
 501		for (i = 31; i >= 0; i--) {
 502			if (!((1 << i) & host->clkdiv_map))
 503				continue;
 504
 505			/*
 506			 * clk = parent_freq / div
 507			 * -> parent_freq = clk x div
 508			 */
 509
 510			div = 1 << (i + 1);
 511			freq = clk_round_rate(host->clk, clk * div);
 512			myclk = freq / div;
 513			diff = (myclk > clk) ? myclk - clk : clk - myclk;
 514
 515			if (diff <= diff_min) {
 516				best_freq = freq;
 517				clkdiv = i;
 518				diff_min = diff;
 519			}
 520		}
 521
 522		dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
 523			(best_freq >> (clkdiv + 1)), clk, best_freq, clkdiv);
 
 524
 525		clk_set_rate(host->clk, best_freq);
 526		clkdiv = clkdiv << 16;
 527	} else if (sup_pclk && clk == current_clk) {
 528		clkdiv = CLK_SUP_PCLK;
 529	} else {
 530		clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
 531	}
 532
 533	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
 534	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 535}
 536
 537static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 538{
 539	u32 tmp;
 540
 541	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
 542
 543	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
 544	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
 545	if (host->ccs_enable)
 546		tmp |= SCCSTO_29;
 547	if (host->clk_ctrl2_enable)
 548		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
 549	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
 550		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
 551	/* byte swap on */
 552	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
 553}
 554
 555static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 556{
 557	struct device *dev = sh_mmcif_host_to_dev(host);
 558	u32 state1, state2;
 559	int ret, timeout;
 560
 561	host->sd_error = false;
 562
 563	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
 564	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
 565	dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
 566	dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
 567
 568	if (state1 & STS1_CMDSEQ) {
 569		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
 570		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
 571		for (timeout = 10000; timeout; timeout--) {
 572			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
 573			      & STS1_CMDSEQ))
 574				break;
 575			mdelay(1);
 576		}
 577		if (!timeout) {
 578			dev_err(dev,
 579				"Forced end of command sequence timeout err\n");
 580			return -EIO;
 581		}
 582		sh_mmcif_sync_reset(host);
 583		dev_dbg(dev, "Forced end of command sequence\n");
 584		return -EIO;
 585	}
 586
 587	if (state2 & STS2_CRC_ERR) {
 588		dev_err(dev, " CRC error: state %u, wait %u\n",
 589			host->state, host->wait_for);
 590		ret = -EIO;
 591	} else if (state2 & STS2_TIMEOUT_ERR) {
 592		dev_err(dev, " Timeout: state %u, wait %u\n",
 593			host->state, host->wait_for);
 594		ret = -ETIMEDOUT;
 595	} else {
 596		dev_dbg(dev, " End/Index error: state %u, wait %u\n",
 597			host->state, host->wait_for);
 598		ret = -EIO;
 599	}
 600	return ret;
 601}
 602
 603static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 604{
 605	struct mmc_data *data = host->mrq->data;
 606
 607	host->sg_blkidx += host->blocksize;
 608
 609	/* data->sg->length must be a multiple of host->blocksize? */
 610	BUG_ON(host->sg_blkidx > data->sg->length);
 611
 612	if (host->sg_blkidx == data->sg->length) {
 613		host->sg_blkidx = 0;
 614		if (++host->sg_idx < data->sg_len)
 615			host->pio_ptr = sg_virt(++data->sg);
 616	} else {
 617		host->pio_ptr = p;
 618	}
 619
 620	return host->sg_idx != data->sg_len;
 621}
 622
 623static void sh_mmcif_single_read(struct sh_mmcif_host *host,
 624				 struct mmc_request *mrq)
 625{
 626	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 627			   BLOCK_SIZE_MASK) + 3;
 628
 629	host->wait_for = MMCIF_WAIT_FOR_READ;
 630
 631	/* buf read enable */
 632	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 633}
 634
 635static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
 636{
 637	struct device *dev = sh_mmcif_host_to_dev(host);
 638	struct mmc_data *data = host->mrq->data;
 639	u32 *p = sg_virt(data->sg);
 640	int i;
 641
 642	if (host->sd_error) {
 643		data->error = sh_mmcif_error_manage(host);
 644		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 645		return false;
 646	}
 647
 648	for (i = 0; i < host->blocksize / 4; i++)
 649		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 650
 651	/* buffer read end */
 652	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
 653	host->wait_for = MMCIF_WAIT_FOR_READ_END;
 654
 655	return true;
 656}
 657
 658static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
 659				struct mmc_request *mrq)
 660{
 661	struct mmc_data *data = mrq->data;
 662
 663	if (!data->sg_len || !data->sg->length)
 664		return;
 665
 666	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 667		BLOCK_SIZE_MASK;
 668
 669	host->wait_for = MMCIF_WAIT_FOR_MREAD;
 670	host->sg_idx = 0;
 671	host->sg_blkidx = 0;
 672	host->pio_ptr = sg_virt(data->sg);
 673
 674	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 675}
 676
 677static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
 678{
 679	struct device *dev = sh_mmcif_host_to_dev(host);
 680	struct mmc_data *data = host->mrq->data;
 681	u32 *p = host->pio_ptr;
 682	int i;
 683
 684	if (host->sd_error) {
 685		data->error = sh_mmcif_error_manage(host);
 686		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 687		return false;
 688	}
 689
 690	BUG_ON(!data->sg->length);
 691
 692	for (i = 0; i < host->blocksize / 4; i++)
 693		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 694
 695	if (!sh_mmcif_next_block(host, p))
 696		return false;
 697
 698	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 699
 700	return true;
 701}
 702
 703static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 704					struct mmc_request *mrq)
 705{
 706	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 707			   BLOCK_SIZE_MASK) + 3;
 708
 709	host->wait_for = MMCIF_WAIT_FOR_WRITE;
 710
 711	/* buf write enable */
 712	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 713}
 714
 715static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
 716{
 717	struct device *dev = sh_mmcif_host_to_dev(host);
 718	struct mmc_data *data = host->mrq->data;
 719	u32 *p = sg_virt(data->sg);
 720	int i;
 721
 722	if (host->sd_error) {
 723		data->error = sh_mmcif_error_manage(host);
 724		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 725		return false;
 726	}
 727
 728	for (i = 0; i < host->blocksize / 4; i++)
 729		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 730
 731	/* buffer write end */
 732	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
 733	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
 734
 735	return true;
 736}
 737
 738static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
 739				struct mmc_request *mrq)
 740{
 741	struct mmc_data *data = mrq->data;
 742
 743	if (!data->sg_len || !data->sg->length)
 744		return;
 745
 746	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 747		BLOCK_SIZE_MASK;
 748
 749	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
 750	host->sg_idx = 0;
 751	host->sg_blkidx = 0;
 752	host->pio_ptr = sg_virt(data->sg);
 753
 754	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 755}
 756
 757static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
 758{
 759	struct device *dev = sh_mmcif_host_to_dev(host);
 760	struct mmc_data *data = host->mrq->data;
 761	u32 *p = host->pio_ptr;
 762	int i;
 763
 764	if (host->sd_error) {
 765		data->error = sh_mmcif_error_manage(host);
 766		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 767		return false;
 768	}
 769
 770	BUG_ON(!data->sg->length);
 771
 772	for (i = 0; i < host->blocksize / 4; i++)
 773		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 774
 775	if (!sh_mmcif_next_block(host, p))
 776		return false;
 777
 778	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 779
 780	return true;
 781}
 782
 783static void sh_mmcif_get_response(struct sh_mmcif_host *host,
 784						struct mmc_command *cmd)
 785{
 786	if (cmd->flags & MMC_RSP_136) {
 787		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
 788		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
 789		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
 790		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
 791	} else
 792		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
 793}
 794
 795static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
 796						struct mmc_command *cmd)
 797{
 798	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
 799}
 800
 801static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 802			    struct mmc_request *mrq)
 803{
 804	struct device *dev = sh_mmcif_host_to_dev(host);
 805	struct mmc_data *data = mrq->data;
 806	struct mmc_command *cmd = mrq->cmd;
 807	u32 opc = cmd->opcode;
 808	u32 tmp = 0;
 809
 810	/* Response Type check */
 811	switch (mmc_resp_type(cmd)) {
 812	case MMC_RSP_NONE:
 813		tmp |= CMD_SET_RTYP_NO;
 814		break;
 815	case MMC_RSP_R1:
 816	case MMC_RSP_R3:
 817		tmp |= CMD_SET_RTYP_6B;
 818		break;
 819	case MMC_RSP_R1B:
 820		tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
 821		break;
 822	case MMC_RSP_R2:
 823		tmp |= CMD_SET_RTYP_17B;
 824		break;
 825	default:
 826		dev_err(dev, "Unsupported response type.\n");
 827		break;
 828	}
 829
 830	/* WDAT / DATW */
 831	if (data) {
 832		tmp |= CMD_SET_WDAT;
 833		switch (host->bus_width) {
 834		case MMC_BUS_WIDTH_1:
 835			tmp |= CMD_SET_DATW_1;
 836			break;
 837		case MMC_BUS_WIDTH_4:
 838			tmp |= CMD_SET_DATW_4;
 839			break;
 840		case MMC_BUS_WIDTH_8:
 841			tmp |= CMD_SET_DATW_8;
 842			break;
 843		default:
 844			dev_err(dev, "Unsupported bus width.\n");
 845			break;
 846		}
 847		switch (host->timing) {
 848		case MMC_TIMING_MMC_DDR52:
 849			/*
 850			 * MMC core will only set this timing, if the host
 851			 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
 852			 * capability. MMCIF implementations with this
 853			 * capability, e.g. sh73a0, will have to set it
 854			 * in their platform data.
 855			 */
 856			tmp |= CMD_SET_DARS;
 857			break;
 858		}
 859	}
 860	/* DWEN */
 861	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
 862		tmp |= CMD_SET_DWEN;
 863	/* CMLTE/CMD12EN */
 864	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
 865		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
 866		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
 867				data->blocks << 16);
 868	}
 869	/* RIDXC[1:0] check bits */
 870	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
 871	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
 872		tmp |= CMD_SET_RIDXC_BITS;
 873	/* RCRC7C[1:0] check bits */
 874	if (opc == MMC_SEND_OP_COND)
 875		tmp |= CMD_SET_CRC7C_BITS;
 876	/* RCRC7C[1:0] internal CRC7 */
 877	if (opc == MMC_ALL_SEND_CID ||
 878		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
 879		tmp |= CMD_SET_CRC7C_INTERNAL;
 880
 881	return (opc << 24) | tmp;
 882}
 883
 884static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 885			       struct mmc_request *mrq, u32 opc)
 886{
 887	struct device *dev = sh_mmcif_host_to_dev(host);
 888
 889	switch (opc) {
 890	case MMC_READ_MULTIPLE_BLOCK:
 891		sh_mmcif_multi_read(host, mrq);
 892		return 0;
 893	case MMC_WRITE_MULTIPLE_BLOCK:
 894		sh_mmcif_multi_write(host, mrq);
 895		return 0;
 896	case MMC_WRITE_BLOCK:
 897		sh_mmcif_single_write(host, mrq);
 898		return 0;
 899	case MMC_READ_SINGLE_BLOCK:
 900	case MMC_SEND_EXT_CSD:
 901		sh_mmcif_single_read(host, mrq);
 902		return 0;
 903	default:
 904		dev_err(dev, "Unsupported CMD%d\n", opc);
 905		return -EINVAL;
 906	}
 907}
 908
 909static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 910			       struct mmc_request *mrq)
 911{
 912	struct mmc_command *cmd = mrq->cmd;
 913	u32 opc;
 914	u32 mask = 0;
 915	unsigned long flags;
 916
 917	if (cmd->flags & MMC_RSP_BUSY)
 918		mask = MASK_START_CMD | MASK_MRBSYE;
 919	else
 920		mask = MASK_START_CMD | MASK_MCRSPE;
 921
 922	if (host->ccs_enable)
 923		mask |= MASK_MCCSTO;
 924
 925	if (mrq->data) {
 926		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
 927		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
 928				mrq->data->blksz);
 929	}
 930	opc = sh_mmcif_set_cmd(host, mrq);
 931
 932	if (host->ccs_enable)
 933		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
 934	else
 935		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
 936	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
 937	/* set arg */
 938	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
 939	/* set cmd */
 940	spin_lock_irqsave(&host->lock, flags);
 941	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 942
 943	host->wait_for = MMCIF_WAIT_FOR_CMD;
 944	schedule_delayed_work(&host->timeout_work, host->timeout);
 945	spin_unlock_irqrestore(&host->lock, flags);
 946}
 947
 948static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 949			      struct mmc_request *mrq)
 950{
 951	struct device *dev = sh_mmcif_host_to_dev(host);
 952
 953	switch (mrq->cmd->opcode) {
 954	case MMC_READ_MULTIPLE_BLOCK:
 955		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
 956		break;
 957	case MMC_WRITE_MULTIPLE_BLOCK:
 958		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
 959		break;
 960	default:
 961		dev_err(dev, "unsupported stop cmd\n");
 962		mrq->stop->error = sh_mmcif_error_manage(host);
 963		return;
 964	}
 965
 966	host->wait_for = MMCIF_WAIT_FOR_STOP;
 967}
 968
 969static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 970{
 971	struct sh_mmcif_host *host = mmc_priv(mmc);
 972	struct device *dev = sh_mmcif_host_to_dev(host);
 973	unsigned long flags;
 974
 975	spin_lock_irqsave(&host->lock, flags);
 976	if (host->state != STATE_IDLE) {
 977		dev_dbg(dev, "%s() rejected, state %u\n",
 978			__func__, host->state);
 979		spin_unlock_irqrestore(&host->lock, flags);
 980		mrq->cmd->error = -EAGAIN;
 981		mmc_request_done(mmc, mrq);
 982		return;
 983	}
 984
 985	host->state = STATE_REQUEST;
 986	spin_unlock_irqrestore(&host->lock, flags);
 987
 988	host->mrq = mrq;
 989
 990	sh_mmcif_start_cmd(host, mrq);
 991}
 992
 993static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
 994{
 995	struct device *dev = sh_mmcif_host_to_dev(host);
 996
 997	if (host->mmc->f_max) {
 998		unsigned int f_max, f_min = 0, f_min_old;
 999
1000		f_max = host->mmc->f_max;
1001		for (f_min_old = f_max; f_min_old > 2;) {
1002			f_min = clk_round_rate(host->clk, f_min_old / 2);
1003			if (f_min == f_min_old)
1004				break;
1005			f_min_old = f_min;
1006		}
1007
1008		/*
1009		 * This driver assumes this SoC is R-Car Gen2 or later
1010		 */
1011		host->clkdiv_map = 0x3ff;
1012
1013		host->mmc->f_max = f_max >> ffs(host->clkdiv_map);
1014		host->mmc->f_min = f_min >> fls(host->clkdiv_map);
1015	} else {
1016		unsigned int clk = clk_get_rate(host->clk);
1017
1018		host->mmc->f_max = clk / 2;
1019		host->mmc->f_min = clk / 512;
1020	}
1021
1022	dev_dbg(dev, "clk max/min = %d/%d\n",
1023		host->mmc->f_max, host->mmc->f_min);
1024}
1025
1026static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1027{
1028	struct sh_mmcif_host *host = mmc_priv(mmc);
1029	struct device *dev = sh_mmcif_host_to_dev(host);
1030	unsigned long flags;
1031
1032	spin_lock_irqsave(&host->lock, flags);
1033	if (host->state != STATE_IDLE) {
1034		dev_dbg(dev, "%s() rejected, state %u\n",
1035			__func__, host->state);
1036		spin_unlock_irqrestore(&host->lock, flags);
1037		return;
1038	}
1039
1040	host->state = STATE_IOS;
1041	spin_unlock_irqrestore(&host->lock, flags);
1042
1043	switch (ios->power_mode) {
1044	case MMC_POWER_UP:
1045		if (!IS_ERR(mmc->supply.vmmc))
1046			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1047		if (!host->power) {
1048			clk_prepare_enable(host->clk);
1049			pm_runtime_get_sync(dev);
1050			sh_mmcif_sync_reset(host);
1051			sh_mmcif_request_dma(host);
1052			host->power = true;
1053		}
1054		break;
1055	case MMC_POWER_OFF:
1056		if (!IS_ERR(mmc->supply.vmmc))
1057			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1058		if (host->power) {
1059			sh_mmcif_clock_control(host, 0);
1060			sh_mmcif_release_dma(host);
1061			pm_runtime_put(dev);
1062			clk_disable_unprepare(host->clk);
1063			host->power = false;
1064		}
1065		break;
1066	case MMC_POWER_ON:
1067		sh_mmcif_clock_control(host, ios->clock);
1068		break;
1069	}
1070
1071	host->timing = ios->timing;
1072	host->bus_width = ios->bus_width;
1073	host->state = STATE_IDLE;
1074}
1075
1076static const struct mmc_host_ops sh_mmcif_ops = {
1077	.request	= sh_mmcif_request,
1078	.set_ios	= sh_mmcif_set_ios,
1079	.get_cd		= mmc_gpio_get_cd,
1080};
1081
1082static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1083{
1084	struct mmc_command *cmd = host->mrq->cmd;
1085	struct mmc_data *data = host->mrq->data;
1086	struct device *dev = sh_mmcif_host_to_dev(host);
1087	long time;
1088
1089	if (host->sd_error) {
1090		switch (cmd->opcode) {
1091		case MMC_ALL_SEND_CID:
1092		case MMC_SELECT_CARD:
1093		case MMC_APP_CMD:
1094			cmd->error = -ETIMEDOUT;
1095			break;
1096		default:
1097			cmd->error = sh_mmcif_error_manage(host);
1098			break;
1099		}
1100		dev_dbg(dev, "CMD%d error %d\n",
1101			cmd->opcode, cmd->error);
1102		host->sd_error = false;
1103		return false;
1104	}
1105	if (!(cmd->flags & MMC_RSP_PRESENT)) {
1106		cmd->error = 0;
1107		return false;
1108	}
1109
1110	sh_mmcif_get_response(host, cmd);
1111
1112	if (!data)
1113		return false;
1114
1115	/*
1116	 * Completion can be signalled from DMA callback and error, so, have to
1117	 * reset here, before setting .dma_active
1118	 */
1119	init_completion(&host->dma_complete);
1120
1121	if (data->flags & MMC_DATA_READ) {
1122		if (host->chan_rx)
1123			sh_mmcif_start_dma_rx(host);
1124	} else {
1125		if (host->chan_tx)
1126			sh_mmcif_start_dma_tx(host);
1127	}
1128
1129	if (!host->dma_active) {
1130		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1131		return !data->error;
1132	}
1133
1134	/* Running in the IRQ thread, can sleep */
1135	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1136							 host->timeout);
1137
1138	if (data->flags & MMC_DATA_READ)
1139		dma_unmap_sg(host->chan_rx->device->dev,
1140			     data->sg, data->sg_len,
1141			     DMA_FROM_DEVICE);
1142	else
1143		dma_unmap_sg(host->chan_tx->device->dev,
1144			     data->sg, data->sg_len,
1145			     DMA_TO_DEVICE);
1146
1147	if (host->sd_error) {
1148		dev_err(host->mmc->parent,
1149			"Error IRQ while waiting for DMA completion!\n");
1150		/* Woken up by an error IRQ: abort DMA */
1151		data->error = sh_mmcif_error_manage(host);
1152	} else if (!time) {
1153		dev_err(host->mmc->parent, "DMA timeout!\n");
1154		data->error = -ETIMEDOUT;
1155	} else if (time < 0) {
1156		dev_err(host->mmc->parent,
1157			"wait_for_completion_...() error %ld!\n", time);
1158		data->error = time;
1159	}
1160	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1161			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1162	host->dma_active = false;
1163
1164	if (data->error) {
1165		data->bytes_xfered = 0;
1166		/* Abort DMA */
1167		if (data->flags & MMC_DATA_READ)
1168			dmaengine_terminate_sync(host->chan_rx);
1169		else
1170			dmaengine_terminate_sync(host->chan_tx);
1171	}
1172
1173	return false;
1174}
1175
1176static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1177{
1178	struct sh_mmcif_host *host = dev_id;
1179	struct mmc_request *mrq;
1180	struct device *dev = sh_mmcif_host_to_dev(host);
1181	bool wait = false;
1182	unsigned long flags;
1183	int wait_work;
1184
1185	spin_lock_irqsave(&host->lock, flags);
1186	wait_work = host->wait_for;
1187	spin_unlock_irqrestore(&host->lock, flags);
1188
1189	cancel_delayed_work_sync(&host->timeout_work);
1190
1191	mutex_lock(&host->thread_lock);
1192
1193	mrq = host->mrq;
1194	if (!mrq) {
1195		dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1196			host->state, host->wait_for);
1197		mutex_unlock(&host->thread_lock);
1198		return IRQ_HANDLED;
1199	}
1200
1201	/*
1202	 * All handlers return true, if processing continues, and false, if the
1203	 * request has to be completed - successfully or not
1204	 */
1205	switch (wait_work) {
1206	case MMCIF_WAIT_FOR_REQUEST:
1207		/* We're too late, the timeout has already kicked in */
1208		mutex_unlock(&host->thread_lock);
1209		return IRQ_HANDLED;
1210	case MMCIF_WAIT_FOR_CMD:
1211		/* Wait for data? */
1212		wait = sh_mmcif_end_cmd(host);
1213		break;
1214	case MMCIF_WAIT_FOR_MREAD:
1215		/* Wait for more data? */
1216		wait = sh_mmcif_mread_block(host);
1217		break;
1218	case MMCIF_WAIT_FOR_READ:
1219		/* Wait for data end? */
1220		wait = sh_mmcif_read_block(host);
1221		break;
1222	case MMCIF_WAIT_FOR_MWRITE:
1223		/* Wait data to write? */
1224		wait = sh_mmcif_mwrite_block(host);
1225		break;
1226	case MMCIF_WAIT_FOR_WRITE:
1227		/* Wait for data end? */
1228		wait = sh_mmcif_write_block(host);
1229		break;
1230	case MMCIF_WAIT_FOR_STOP:
1231		if (host->sd_error) {
1232			mrq->stop->error = sh_mmcif_error_manage(host);
1233			dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
1234			break;
1235		}
1236		sh_mmcif_get_cmd12response(host, mrq->stop);
1237		mrq->stop->error = 0;
1238		break;
1239	case MMCIF_WAIT_FOR_READ_END:
1240	case MMCIF_WAIT_FOR_WRITE_END:
1241		if (host->sd_error) {
1242			mrq->data->error = sh_mmcif_error_manage(host);
1243			dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
1244		}
1245		break;
1246	default:
1247		BUG();
1248	}
1249
1250	if (wait) {
1251		schedule_delayed_work(&host->timeout_work, host->timeout);
1252		/* Wait for more data */
1253		mutex_unlock(&host->thread_lock);
1254		return IRQ_HANDLED;
1255	}
1256
1257	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1258		struct mmc_data *data = mrq->data;
1259		if (!mrq->cmd->error && data && !data->error)
1260			data->bytes_xfered =
1261				data->blocks * data->blksz;
1262
1263		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1264			sh_mmcif_stop_cmd(host, mrq);
1265			if (!mrq->stop->error) {
1266				schedule_delayed_work(&host->timeout_work, host->timeout);
1267				mutex_unlock(&host->thread_lock);
1268				return IRQ_HANDLED;
1269			}
1270		}
1271	}
1272
1273	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1274	host->state = STATE_IDLE;
1275	host->mrq = NULL;
1276	mmc_request_done(host->mmc, mrq);
1277
1278	mutex_unlock(&host->thread_lock);
1279
1280	return IRQ_HANDLED;
1281}
1282
1283static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1284{
1285	struct sh_mmcif_host *host = dev_id;
1286	struct device *dev = sh_mmcif_host_to_dev(host);
1287	u32 state, mask;
1288
1289	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1290	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1291	if (host->ccs_enable)
1292		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1293	else
1294		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1295	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1296
1297	if (state & ~MASK_CLEAN)
1298		dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
1299			state);
1300
1301	if (state & INT_ERR_STS || state & ~INT_ALL) {
1302		host->sd_error = true;
1303		dev_dbg(dev, "int err state = 0x%08x\n", state);
1304	}
1305	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1306		if (!host->mrq)
1307			dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
1308		if (!host->dma_active)
1309			return IRQ_WAKE_THREAD;
1310		else if (host->sd_error)
1311			sh_mmcif_dma_complete(host);
1312	} else {
1313		dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
1314	}
1315
1316	return IRQ_HANDLED;
1317}
1318
1319static void sh_mmcif_timeout_work(struct work_struct *work)
1320{
1321	struct delayed_work *d = to_delayed_work(work);
1322	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1323	struct mmc_request *mrq = host->mrq;
1324	struct device *dev = sh_mmcif_host_to_dev(host);
1325	unsigned long flags;
1326
1327	if (host->dying)
1328		/* Don't run after mmc_remove_host() */
1329		return;
1330
1331	spin_lock_irqsave(&host->lock, flags);
1332	if (host->state == STATE_IDLE) {
1333		spin_unlock_irqrestore(&host->lock, flags);
1334		return;
1335	}
1336
1337	dev_err(dev, "Timeout waiting for %u on CMD%u\n",
1338		host->wait_for, mrq->cmd->opcode);
1339
1340	host->state = STATE_TIMEOUT;
1341	spin_unlock_irqrestore(&host->lock, flags);
1342
1343	/*
1344	 * Handle races with cancel_delayed_work(), unless
1345	 * cancel_delayed_work_sync() is used
1346	 */
1347	switch (host->wait_for) {
1348	case MMCIF_WAIT_FOR_CMD:
1349		mrq->cmd->error = sh_mmcif_error_manage(host);
1350		break;
1351	case MMCIF_WAIT_FOR_STOP:
1352		mrq->stop->error = sh_mmcif_error_manage(host);
1353		break;
1354	case MMCIF_WAIT_FOR_MREAD:
1355	case MMCIF_WAIT_FOR_MWRITE:
1356	case MMCIF_WAIT_FOR_READ:
1357	case MMCIF_WAIT_FOR_WRITE:
1358	case MMCIF_WAIT_FOR_READ_END:
1359	case MMCIF_WAIT_FOR_WRITE_END:
1360		mrq->data->error = sh_mmcif_error_manage(host);
1361		break;
1362	default:
1363		BUG();
1364	}
1365
1366	host->state = STATE_IDLE;
1367	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1368	host->mrq = NULL;
1369	mmc_request_done(host->mmc, mrq);
1370}
1371
1372static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1373{
1374	struct device *dev = sh_mmcif_host_to_dev(host);
1375	struct sh_mmcif_plat_data *pd = dev->platform_data;
1376	struct mmc_host *mmc = host->mmc;
1377
1378	mmc_regulator_get_supply(mmc);
1379
1380	if (!pd)
1381		return;
1382
1383	if (!mmc->ocr_avail)
1384		mmc->ocr_avail = pd->ocr;
1385	else if (pd->ocr)
1386		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1387}
1388
1389static int sh_mmcif_probe(struct platform_device *pdev)
1390{
1391	int ret = 0, irq[2];
1392	struct mmc_host *mmc;
1393	struct sh_mmcif_host *host;
1394	struct device *dev = &pdev->dev;
1395	struct sh_mmcif_plat_data *pd = dev->platform_data;
 
1396	void __iomem *reg;
1397	const char *name;
1398
1399	irq[0] = platform_get_irq(pdev, 0);
1400	irq[1] = platform_get_irq_optional(pdev, 1);
1401	if (irq[0] < 0)
1402		return irq[0];
 
 
1403
1404	reg = devm_platform_ioremap_resource(pdev, 0);
 
1405	if (IS_ERR(reg))
1406		return PTR_ERR(reg);
1407
1408	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
1409	if (!mmc)
1410		return -ENOMEM;
1411
1412	ret = mmc_of_parse(mmc);
1413	if (ret < 0)
1414		goto err_host;
1415
1416	host		= mmc_priv(mmc);
1417	host->mmc	= mmc;
1418	host->addr	= reg;
1419	host->timeout	= msecs_to_jiffies(10000);
1420	host->ccs_enable = true;
1421	host->clk_ctrl2_enable = false;
1422
1423	host->pd = pdev;
1424
1425	spin_lock_init(&host->lock);
1426
1427	mmc->ops = &sh_mmcif_ops;
1428	sh_mmcif_init_ocr(host);
1429
1430	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1431	mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1432	mmc->max_busy_timeout = 10000;
1433
1434	if (pd && pd->caps)
1435		mmc->caps |= pd->caps;
1436	mmc->max_segs = 32;
1437	mmc->max_blk_size = 512;
1438	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1439	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1440	mmc->max_seg_size = mmc->max_req_size;
1441
1442	platform_set_drvdata(pdev, host);
1443
1444	host->clk = devm_clk_get(dev, NULL);
1445	if (IS_ERR(host->clk)) {
1446		ret = PTR_ERR(host->clk);
1447		dev_err(dev, "cannot get clock: %d\n", ret);
1448		goto err_host;
1449	}
1450
1451	ret = clk_prepare_enable(host->clk);
1452	if (ret < 0)
1453		goto err_host;
1454
1455	sh_mmcif_clk_setup(host);
1456
1457	pm_runtime_enable(dev);
1458	host->power = false;
1459
1460	ret = pm_runtime_get_sync(dev);
1461	if (ret < 0)
1462		goto err_clk;
1463
1464	INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
1465
1466	sh_mmcif_sync_reset(host);
1467	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1468
1469	name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
1470	ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
1471					sh_mmcif_irqt, 0, name, host);
1472	if (ret) {
1473		dev_err(dev, "request_irq error (%s)\n", name);
1474		goto err_clk;
1475	}
1476	if (irq[1] >= 0) {
1477		ret = devm_request_threaded_irq(dev, irq[1],
1478						sh_mmcif_intr, sh_mmcif_irqt,
1479						0, "sh_mmc:int", host);
1480		if (ret) {
1481			dev_err(dev, "request_irq error (sh_mmc:int)\n");
1482			goto err_clk;
1483		}
1484	}
1485
1486	mutex_init(&host->thread_lock);
1487
1488	ret = mmc_add_host(mmc);
1489	if (ret < 0)
1490		goto err_clk;
1491
1492	dev_pm_qos_expose_latency_limit(dev, 100);
1493
1494	dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
1495		 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1496		 clk_get_rate(host->clk) / 1000000UL);
1497
1498	pm_runtime_put(dev);
1499	clk_disable_unprepare(host->clk);
1500	return ret;
1501
1502err_clk:
1503	clk_disable_unprepare(host->clk);
1504	pm_runtime_put_sync(dev);
1505	pm_runtime_disable(dev);
1506err_host:
1507	mmc_free_host(mmc);
1508	return ret;
1509}
1510
1511static void sh_mmcif_remove(struct platform_device *pdev)
1512{
1513	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1514
1515	host->dying = true;
1516	clk_prepare_enable(host->clk);
1517	pm_runtime_get_sync(&pdev->dev);
1518
1519	dev_pm_qos_hide_latency_limit(&pdev->dev);
1520
1521	mmc_remove_host(host->mmc);
1522	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1523
1524	/*
1525	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1526	 * mmc_remove_host() call above. But swapping order doesn't help either
1527	 * (a query on the linux-mmc mailing list didn't bring any replies).
1528	 */
1529	cancel_delayed_work_sync(&host->timeout_work);
1530
1531	clk_disable_unprepare(host->clk);
1532	mmc_free_host(host->mmc);
1533	pm_runtime_put_sync(&pdev->dev);
1534	pm_runtime_disable(&pdev->dev);
 
 
1535}
1536
1537#ifdef CONFIG_PM_SLEEP
1538static int sh_mmcif_suspend(struct device *dev)
1539{
1540	struct sh_mmcif_host *host = dev_get_drvdata(dev);
1541
1542	pm_runtime_get_sync(dev);
1543	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1544	pm_runtime_put(dev);
1545
1546	return 0;
1547}
1548
1549static int sh_mmcif_resume(struct device *dev)
1550{
1551	return 0;
1552}
1553#endif
1554
1555static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1556	SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1557};
1558
1559static struct platform_driver sh_mmcif_driver = {
1560	.probe		= sh_mmcif_probe,
1561	.remove_new	= sh_mmcif_remove,
1562	.driver		= {
1563		.name	= DRIVER_NAME,
1564		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1565		.pm	= &sh_mmcif_dev_pm_ops,
1566		.of_match_table = sh_mmcif_of_match,
1567	},
1568};
1569
1570module_platform_driver(sh_mmcif_driver);
1571
1572MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1573MODULE_LICENSE("GPL v2");
1574MODULE_ALIAS("platform:" DRIVER_NAME);
1575MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");