Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Synopsys DesignWare Multimedia Card Interface driver
   4 *  (Based on NXP driver for lpc 31xx)
   5 *
   6 * Copyright (C) 2009 NXP Semiconductors
   7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
 
 
 
 
 
   8 */
   9
  10#include <linux/blkdev.h>
  11#include <linux/clk.h>
  12#include <linux/debugfs.h>
  13#include <linux/device.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/err.h>
  16#include <linux/init.h>
  17#include <linux/interrupt.h>
  18#include <linux/iopoll.h>
  19#include <linux/ioport.h>
  20#include <linux/ktime.h>
  21#include <linux/module.h>
  22#include <linux/platform_device.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/prandom.h>
  25#include <linux/seq_file.h>
  26#include <linux/slab.h>
  27#include <linux/stat.h>
  28#include <linux/delay.h>
  29#include <linux/irq.h>
  30#include <linux/mmc/card.h>
  31#include <linux/mmc/host.h>
  32#include <linux/mmc/mmc.h>
  33#include <linux/mmc/sd.h>
  34#include <linux/mmc/sdio.h>
 
  35#include <linux/bitops.h>
  36#include <linux/regulator/consumer.h>
 
  37#include <linux/of.h>
  38#include <linux/of_gpio.h>
  39#include <linux/mmc/slot-gpio.h>
  40
  41#include "dw_mmc.h"
  42
  43/* Common flag combinations */
  44#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
  45				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
  46				 SDMMC_INT_EBE | SDMMC_INT_HLE)
  47#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  48				 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
  49#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
  50				 DW_MCI_CMD_ERROR_FLAGS)
  51#define DW_MCI_SEND_STATUS	1
  52#define DW_MCI_RECV_STATUS	2
  53#define DW_MCI_DMA_THRESHOLD	16
  54
  55#define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
  56#define DW_MCI_FREQ_MIN	100000		/* unit: HZ */
  57
 
  58#define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
  59				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
  60				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
  61				 SDMMC_IDMAC_INT_TI)
  62
  63#define DESC_RING_BUF_SZ	PAGE_SIZE
  64
  65struct idmac_desc_64addr {
  66	u32		des0;	/* Control Descriptor */
  67#define IDMAC_OWN_CLR64(x) \
  68	!((x) & cpu_to_le32(IDMAC_DES0_OWN))
  69
  70	u32		des1;	/* Reserved */
  71
  72	u32		des2;	/*Buffer sizes */
  73#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
  74	((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
  75	 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
  76
  77	u32		des3;	/* Reserved */
  78
  79	u32		des4;	/* Lower 32-bits of Buffer Address Pointer 1*/
  80	u32		des5;	/* Upper 32-bits of Buffer Address Pointer 1*/
  81
  82	u32		des6;	/* Lower 32-bits of Next Descriptor Address */
  83	u32		des7;	/* Upper 32-bits of Next Descriptor Address */
  84};
  85
  86struct idmac_desc {
  87	__le32		des0;	/* Control Descriptor */
  88#define IDMAC_DES0_DIC	BIT(1)
  89#define IDMAC_DES0_LD	BIT(2)
  90#define IDMAC_DES0_FD	BIT(3)
  91#define IDMAC_DES0_CH	BIT(4)
  92#define IDMAC_DES0_ER	BIT(5)
  93#define IDMAC_DES0_CES	BIT(30)
  94#define IDMAC_DES0_OWN	BIT(31)
  95
  96	__le32		des1;	/* Buffer sizes */
  97#define IDMAC_SET_BUFFER1_SIZE(d, s) \
  98	((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
 
 
 
 
 
 
  99
 100	__le32		des2;	/* buffer 1 physical address */
 
 
 
 
 
 
 
 
 
 101
 102	__le32		des3;	/* buffer 2 physical address */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 103};
 104
 105/* Each descriptor can transfer up to 4KB of data in chained mode */
 106#define DW_MCI_DESC_DATA_LENGTH	0x1000
 107
 108#if defined(CONFIG_DEBUG_FS)
 109static int dw_mci_req_show(struct seq_file *s, void *v)
 110{
 111	struct dw_mci_slot *slot = s->private;
 112	struct mmc_request *mrq;
 113	struct mmc_command *cmd;
 114	struct mmc_command *stop;
 115	struct mmc_data	*data;
 116
 117	/* Make sure we get a consistent snapshot */
 118	spin_lock_bh(&slot->host->lock);
 119	mrq = slot->mrq;
 120
 121	if (mrq) {
 122		cmd = mrq->cmd;
 123		data = mrq->data;
 124		stop = mrq->stop;
 125
 126		if (cmd)
 127			seq_printf(s,
 128				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 129				   cmd->opcode, cmd->arg, cmd->flags,
 130				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
 131				   cmd->resp[2], cmd->error);
 132		if (data)
 133			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
 134				   data->bytes_xfered, data->blocks,
 135				   data->blksz, data->flags, data->error);
 136		if (stop)
 137			seq_printf(s,
 138				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 139				   stop->opcode, stop->arg, stop->flags,
 140				   stop->resp[0], stop->resp[1], stop->resp[2],
 141				   stop->resp[2], stop->error);
 142	}
 143
 144	spin_unlock_bh(&slot->host->lock);
 145
 146	return 0;
 147}
 148DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
 149
 150static int dw_mci_regs_show(struct seq_file *s, void *v)
 151{
 152	struct dw_mci *host = s->private;
 153
 154	pm_runtime_get_sync(host->dev);
 155
 156	seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
 157	seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
 158	seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
 159	seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
 160	seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
 161	seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
 
 162
 163	pm_runtime_put_autosuspend(host->dev);
 
 
 
 
 
 
 
 164
 165	return 0;
 166}
 167DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
 
 
 
 
 
 
 
 
 
 
 
 
 168
 169static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
 170{
 171	struct mmc_host	*mmc = slot->mmc;
 172	struct dw_mci *host = slot->host;
 173	struct dentry *root;
 
 174
 175	root = mmc->debugfs_root;
 176	if (!root)
 177		return;
 178
 179	debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
 180	debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
 181	debugfs_create_u32("state", S_IRUSR, root, &host->state);
 182	debugfs_create_xul("pending_events", S_IRUSR, root,
 183			   &host->pending_events);
 184	debugfs_create_xul("completed_events", S_IRUSR, root,
 185			   &host->completed_events);
 186#ifdef CONFIG_FAULT_INJECTION
 187	fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
 188#endif
 189}
 190#endif /* defined(CONFIG_DEBUG_FS) */
 191
 192static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
 193{
 194	u32 ctrl;
 
 195
 196	ctrl = mci_readl(host, CTRL);
 197	ctrl |= reset;
 198	mci_writel(host, CTRL, ctrl);
 199
 200	/* wait till resets clear */
 201	if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
 202				      !(ctrl & reset),
 203				      1, 500 * USEC_PER_MSEC)) {
 204		dev_err(host->dev,
 205			"Timeout resetting block (ctrl reset %#x)\n",
 206			ctrl & reset);
 207		return false;
 208	}
 209
 210	return true;
 211}
 
 
 212
 213static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
 214{
 215	u32 status;
 216
 217	/*
 218	 * Databook says that before issuing a new data transfer command
 219	 * we need to check to see if the card is busy.  Data transfer commands
 220	 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
 221	 *
 222	 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
 223	 * expected.
 224	 */
 225	if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
 226	    !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
 227		if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
 228					      status,
 229					      !(status & SDMMC_STATUS_BUSY),
 230					      10, 500 * USEC_PER_MSEC))
 231			dev_err(host->dev, "Busy; trying anyway\n");
 232	}
 233}
 
 234
 235static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
 236{
 237	struct dw_mci *host = slot->host;
 238	unsigned int cmd_status = 0;
 239
 240	mci_writel(host, CMDARG, arg);
 241	wmb(); /* drain writebuffer */
 242	dw_mci_wait_while_busy(host, cmd);
 243	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
 244
 245	if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
 246				      !(cmd_status & SDMMC_CMD_START),
 247				      1, 500 * USEC_PER_MSEC))
 248		dev_err(&slot->mmc->class_dev,
 249			"Timeout sending command (cmd %#x arg %#x status %#x)\n",
 250			cmd, arg, cmd_status);
 251}
 252
 253static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
 254{
 
 255	struct dw_mci_slot *slot = mmc_priv(mmc);
 256	struct dw_mci *host = slot->host;
 257	u32 cmdr;
 258
 259	cmd->error = -EINPROGRESS;
 
 260	cmdr = cmd->opcode;
 261
 262	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
 263	    cmd->opcode == MMC_GO_IDLE_STATE ||
 264	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
 265	    (cmd->opcode == SD_IO_RW_DIRECT &&
 266	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
 267		cmdr |= SDMMC_CMD_STOP;
 268	else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
 269		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
 270
 271	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
 272		u32 clk_en_a;
 273
 274		/* Special bit makes CMD11 not die */
 275		cmdr |= SDMMC_CMD_VOLT_SWITCH;
 276
 277		/* Change state to continue to handle CMD11 weirdness */
 278		WARN_ON(slot->host->state != STATE_SENDING_CMD);
 279		slot->host->state = STATE_SENDING_CMD11;
 280
 281		/*
 282		 * We need to disable low power mode (automatic clock stop)
 283		 * while doing voltage switch so we don't confuse the card,
 284		 * since stopping the clock is a specific part of the UHS
 285		 * voltage change dance.
 286		 *
 287		 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
 288		 * unconditionally turned back on in dw_mci_setup_bus() if it's
 289		 * ever called with a non-zero clock.  That shouldn't happen
 290		 * until the voltage change is all done.
 291		 */
 292		clk_en_a = mci_readl(host, CLKENA);
 293		clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
 294		mci_writel(host, CLKENA, clk_en_a);
 295		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
 296			     SDMMC_CMD_PRV_DAT_WAIT, 0);
 297	}
 298
 299	if (cmd->flags & MMC_RSP_PRESENT) {
 300		/* We expect a response, so set this bit */
 301		cmdr |= SDMMC_CMD_RESP_EXP;
 302		if (cmd->flags & MMC_RSP_136)
 303			cmdr |= SDMMC_CMD_RESP_LONG;
 304	}
 305
 306	if (cmd->flags & MMC_RSP_CRC)
 307		cmdr |= SDMMC_CMD_RESP_CRC;
 308
 309	if (cmd->data) {
 
 310		cmdr |= SDMMC_CMD_DAT_EXP;
 311		if (cmd->data->flags & MMC_DATA_WRITE)
 
 
 312			cmdr |= SDMMC_CMD_DAT_WR;
 313	}
 314
 315	if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
 316		cmdr |= SDMMC_CMD_USE_HOLD_REG;
 317
 318	return cmdr;
 319}
 320
 321static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
 322{
 323	struct mmc_command *stop;
 324	u32 cmdr;
 325
 326	if (!cmd->data)
 327		return 0;
 328
 329	stop = &host->stop_abort;
 330	cmdr = cmd->opcode;
 331	memset(stop, 0, sizeof(struct mmc_command));
 332
 333	if (cmdr == MMC_READ_SINGLE_BLOCK ||
 334	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
 335	    cmdr == MMC_WRITE_BLOCK ||
 336	    cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
 337	    mmc_op_tuning(cmdr) ||
 338	    cmdr == MMC_GEN_CMD) {
 339		stop->opcode = MMC_STOP_TRANSMISSION;
 340		stop->arg = 0;
 341		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 342	} else if (cmdr == SD_IO_RW_EXTENDED) {
 343		stop->opcode = SD_IO_RW_DIRECT;
 344		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
 345			     ((cmd->arg >> 28) & 0x7);
 346		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
 347	} else {
 348		return 0;
 349	}
 350
 351	cmdr = stop->opcode | SDMMC_CMD_STOP |
 352		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
 353
 354	if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
 355		cmdr |= SDMMC_CMD_USE_HOLD_REG;
 356
 357	return cmdr;
 358}
 359
 360static inline void dw_mci_set_cto(struct dw_mci *host)
 361{
 362	unsigned int cto_clks;
 363	unsigned int cto_div;
 364	unsigned int cto_ms;
 365	unsigned long irqflags;
 366
 367	cto_clks = mci_readl(host, TMOUT) & 0xff;
 368	cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
 369	if (cto_div == 0)
 370		cto_div = 1;
 371
 372	cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
 373				  host->bus_hz);
 374
 375	/* add a bit spare time */
 376	cto_ms += 10;
 377
 378	/*
 379	 * The durations we're working with are fairly short so we have to be
 380	 * extra careful about synchronization here.  Specifically in hardware a
 381	 * command timeout is _at most_ 5.1 ms, so that means we expect an
 382	 * interrupt (either command done or timeout) to come rather quickly
 383	 * after the mci_writel.  ...but just in case we have a long interrupt
 384	 * latency let's add a bit of paranoia.
 385	 *
 386	 * In general we'll assume that at least an interrupt will be asserted
 387	 * in hardware by the time the cto_timer runs.  ...and if it hasn't
 388	 * been asserted in hardware by that time then we'll assume it'll never
 389	 * come.
 390	 */
 391	spin_lock_irqsave(&host->irq_lock, irqflags);
 392	if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
 393		mod_timer(&host->cto_timer,
 394			jiffies + msecs_to_jiffies(cto_ms) + 1);
 395	spin_unlock_irqrestore(&host->irq_lock, irqflags);
 396}
 397
 398static void dw_mci_start_command(struct dw_mci *host,
 399				 struct mmc_command *cmd, u32 cmd_flags)
 400{
 401	host->cmd = cmd;
 402	dev_vdbg(host->dev,
 403		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
 404		 cmd->arg, cmd_flags);
 405
 406	mci_writel(host, CMDARG, cmd->arg);
 407	wmb(); /* drain writebuffer */
 408	dw_mci_wait_while_busy(host, cmd_flags);
 409
 410	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
 411
 412	/* response expected command only */
 413	if (cmd_flags & SDMMC_CMD_RESP_EXP)
 414		dw_mci_set_cto(host);
 415}
 416
 417static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
 418{
 419	struct mmc_command *stop = &host->stop_abort;
 420
 421	dw_mci_start_command(host, stop, host->stop_cmdr);
 422}
 423
 424/* DMA interface functions */
 425static void dw_mci_stop_dma(struct dw_mci *host)
 426{
 427	if (host->using_dma) {
 428		host->dma_ops->stop(host);
 429		host->dma_ops->cleanup(host);
 430	}
 431
 432	/* Data transfer was stopped by the interrupt handler */
 433	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 434}
 435
 
 
 
 
 
 
 
 
 
 436static void dw_mci_dma_cleanup(struct dw_mci *host)
 437{
 438	struct mmc_data *data = host->data;
 439
 440	if (data && data->host_cookie == COOKIE_MAPPED) {
 441		dma_unmap_sg(host->dev,
 442			     data->sg,
 443			     data->sg_len,
 444			     mmc_get_dma_dir(data));
 445		data->host_cookie = COOKIE_UNMAPPED;
 446	}
 447}
 448
 449static void dw_mci_idmac_reset(struct dw_mci *host)
 450{
 451	u32 bmod = mci_readl(host, BMOD);
 452	/* Software reset of DMA */
 453	bmod |= SDMMC_IDMAC_SWRESET;
 454	mci_writel(host, BMOD, bmod);
 455}
 456
 457static void dw_mci_idmac_stop_dma(struct dw_mci *host)
 458{
 459	u32 temp;
 460
 461	/* Disable and reset the IDMAC interface */
 462	temp = mci_readl(host, CTRL);
 463	temp &= ~SDMMC_CTRL_USE_IDMAC;
 464	temp |= SDMMC_CTRL_DMA_RESET;
 465	mci_writel(host, CTRL, temp);
 466
 467	/* Stop the IDMAC running */
 468	temp = mci_readl(host, BMOD);
 469	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
 470	temp |= SDMMC_IDMAC_SWRESET;
 471	mci_writel(host, BMOD, temp);
 472}
 473
 474static void dw_mci_dmac_complete_dma(void *arg)
 475{
 476	struct dw_mci *host = arg;
 477	struct mmc_data *data = host->data;
 478
 479	dev_vdbg(host->dev, "DMA complete\n");
 480
 481	if ((host->use_dma == TRANS_MODE_EDMAC) &&
 482	    data && (data->flags & MMC_DATA_READ))
 483		/* Invalidate cache after read */
 484		dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
 485				    data->sg,
 486				    data->sg_len,
 487				    DMA_FROM_DEVICE);
 488
 489	host->dma_ops->cleanup(host);
 490
 491	/*
 492	 * If the card was removed, data will be NULL. No point in trying to
 493	 * send the stop command or waiting for NBUSY in this case.
 494	 */
 495	if (data) {
 496		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 497		tasklet_schedule(&host->tasklet);
 498	}
 499}
 500
 501static int dw_mci_idmac_init(struct dw_mci *host)
 
 502{
 503	int i;
 
 504
 505	if (host->dma_64bit_address == 1) {
 506		struct idmac_desc_64addr *p;
 507		/* Number of descriptors in the ring buffer */
 508		host->ring_size =
 509			DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
 510
 511		/* Forward link the descriptor list */
 512		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
 513								i++, p++) {
 514			p->des6 = (host->sg_dma +
 515					(sizeof(struct idmac_desc_64addr) *
 516							(i + 1))) & 0xffffffff;
 517
 518			p->des7 = (u64)(host->sg_dma +
 519					(sizeof(struct idmac_desc_64addr) *
 520							(i + 1))) >> 32;
 521			/* Initialize reserved and buffer size fields to "0" */
 522			p->des0 = 0;
 523			p->des1 = 0;
 524			p->des2 = 0;
 525			p->des3 = 0;
 526		}
 527
 528		/* Set the last descriptor as the end-of-ring descriptor */
 529		p->des6 = host->sg_dma & 0xffffffff;
 530		p->des7 = (u64)host->sg_dma >> 32;
 531		p->des0 = IDMAC_DES0_ER;
 532
 533	} else {
 534		struct idmac_desc *p;
 535		/* Number of descriptors in the ring buffer */
 536		host->ring_size =
 537			DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
 538
 539		/* Forward link the descriptor list */
 540		for (i = 0, p = host->sg_cpu;
 541		     i < host->ring_size - 1;
 542		     i++, p++) {
 543			p->des3 = cpu_to_le32(host->sg_dma +
 544					(sizeof(struct idmac_desc) * (i + 1)));
 545			p->des0 = 0;
 546			p->des1 = 0;
 547		}
 548
 549		/* Set the last descriptor as the end-of-ring descriptor */
 550		p->des3 = cpu_to_le32(host->sg_dma);
 551		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
 552	}
 553
 554	dw_mci_idmac_reset(host);
 555
 556	if (host->dma_64bit_address == 1) {
 557		/* Mask out interrupts - get Tx & Rx complete only */
 558		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
 559		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
 560				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
 561
 562		/* Set the descriptor base address */
 563		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
 564		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
 565
 566	} else {
 567		/* Mask out interrupts - get Tx & Rx complete only */
 568		mci_writel(host, IDSTS, IDMAC_INT_CLR);
 569		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
 570				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
 571
 572		/* Set the descriptor base address */
 573		mci_writel(host, DBADDR, host->sg_dma);
 574	}
 575
 576	return 0;
 577}
 578
 579static inline int dw_mci_prepare_desc64(struct dw_mci *host,
 580					 struct mmc_data *data,
 581					 unsigned int sg_len)
 582{
 583	unsigned int desc_len;
 584	struct idmac_desc_64addr *desc_first, *desc_last, *desc;
 585	u32 val;
 586	int i;
 587
 588	desc_first = desc_last = desc = host->sg_cpu;
 589
 590	for (i = 0; i < sg_len; i++) {
 591		unsigned int length = sg_dma_len(&data->sg[i]);
 592
 593		u64 mem_addr = sg_dma_address(&data->sg[i]);
 594
 595		for ( ; length ; desc++) {
 596			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
 597				   length : DW_MCI_DESC_DATA_LENGTH;
 598
 599			length -= desc_len;
 600
 601			/*
 602			 * Wait for the former clear OWN bit operation
 603			 * of IDMAC to make sure that this descriptor
 604			 * isn't still owned by IDMAC as IDMAC's write
 605			 * ops and CPU's read ops are asynchronous.
 606			 */
 607			if (readl_poll_timeout_atomic(&desc->des0, val,
 608						!(val & IDMAC_DES0_OWN),
 609						10, 100 * USEC_PER_MSEC))
 610				goto err_own_bit;
 611
 612			/*
 613			 * Set the OWN bit and disable interrupts
 614			 * for this descriptor
 615			 */
 616			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
 617						IDMAC_DES0_CH;
 618
 619			/* Buffer length */
 620			IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
 621
 622			/* Physical address to DMA to/from */
 623			desc->des4 = mem_addr & 0xffffffff;
 624			desc->des5 = mem_addr >> 32;
 625
 626			/* Update physical address for the next desc */
 627			mem_addr += desc_len;
 628
 629			/* Save pointer to the last descriptor */
 630			desc_last = desc;
 631		}
 632	}
 633
 634	/* Set first descriptor */
 635	desc_first->des0 |= IDMAC_DES0_FD;
 636
 637	/* Set last descriptor */
 638	desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
 639	desc_last->des0 |= IDMAC_DES0_LD;
 640
 641	return 0;
 642err_own_bit:
 643	/* restore the descriptor chain as it's polluted */
 644	dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
 645	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
 646	dw_mci_idmac_init(host);
 647	return -EINVAL;
 648}
 649
 650
 651static inline int dw_mci_prepare_desc32(struct dw_mci *host,
 652					 struct mmc_data *data,
 653					 unsigned int sg_len)
 654{
 655	unsigned int desc_len;
 656	struct idmac_desc *desc_first, *desc_last, *desc;
 657	u32 val;
 658	int i;
 659
 660	desc_first = desc_last = desc = host->sg_cpu;
 661
 662	for (i = 0; i < sg_len; i++) {
 663		unsigned int length = sg_dma_len(&data->sg[i]);
 664
 665		u32 mem_addr = sg_dma_address(&data->sg[i]);
 666
 667		for ( ; length ; desc++) {
 668			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
 669				   length : DW_MCI_DESC_DATA_LENGTH;
 670
 671			length -= desc_len;
 672
 673			/*
 674			 * Wait for the former clear OWN bit operation
 675			 * of IDMAC to make sure that this descriptor
 676			 * isn't still owned by IDMAC as IDMAC's write
 677			 * ops and CPU's read ops are asynchronous.
 678			 */
 679			if (readl_poll_timeout_atomic(&desc->des0, val,
 680						      IDMAC_OWN_CLR64(val),
 681						      10,
 682						      100 * USEC_PER_MSEC))
 683				goto err_own_bit;
 684
 685			/*
 686			 * Set the OWN bit and disable interrupts
 687			 * for this descriptor
 688			 */
 689			desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
 690						 IDMAC_DES0_DIC |
 691						 IDMAC_DES0_CH);
 692
 693			/* Buffer length */
 694			IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
 695
 696			/* Physical address to DMA to/from */
 697			desc->des2 = cpu_to_le32(mem_addr);
 698
 699			/* Update physical address for the next desc */
 700			mem_addr += desc_len;
 701
 702			/* Save pointer to the last descriptor */
 703			desc_last = desc;
 704		}
 705	}
 706
 707	/* Set first descriptor */
 708	desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
 
 709
 710	/* Set last descriptor */
 711	desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
 712				       IDMAC_DES0_DIC));
 713	desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
 714
 715	return 0;
 716err_own_bit:
 717	/* restore the descriptor chain as it's polluted */
 718	dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
 719	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
 720	dw_mci_idmac_init(host);
 721	return -EINVAL;
 722}
 723
 724static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 725{
 726	u32 temp;
 727	int ret;
 728
 729	if (host->dma_64bit_address == 1)
 730		ret = dw_mci_prepare_desc64(host, host->data, sg_len);
 731	else
 732		ret = dw_mci_prepare_desc32(host, host->data, sg_len);
 733
 734	if (ret)
 735		goto out;
 736
 737	/* drain writebuffer */
 738	wmb();
 739
 740	/* Make sure to reset DMA in case we did PIO before this */
 741	dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
 742	dw_mci_idmac_reset(host);
 743
 744	/* Select IDMAC interface */
 745	temp = mci_readl(host, CTRL);
 746	temp |= SDMMC_CTRL_USE_IDMAC;
 747	mci_writel(host, CTRL, temp);
 748
 749	/* drain writebuffer */
 750	wmb();
 751
 752	/* Enable the IDMAC */
 753	temp = mci_readl(host, BMOD);
 754	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
 755	mci_writel(host, BMOD, temp);
 756
 757	/* Start it running */
 758	mci_writel(host, PLDMND, 1);
 759
 760out:
 761	return ret;
 762}
 763
 764static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
 765	.init = dw_mci_idmac_init,
 766	.start = dw_mci_idmac_start_dma,
 767	.stop = dw_mci_idmac_stop_dma,
 768	.complete = dw_mci_dmac_complete_dma,
 769	.cleanup = dw_mci_dma_cleanup,
 770};
 771
 772static void dw_mci_edmac_stop_dma(struct dw_mci *host)
 773{
 774	dmaengine_terminate_async(host->dms->ch);
 775}
 776
 777static int dw_mci_edmac_start_dma(struct dw_mci *host,
 778					    unsigned int sg_len)
 779{
 780	struct dma_slave_config cfg;
 781	struct dma_async_tx_descriptor *desc = NULL;
 782	struct scatterlist *sgl = host->data->sg;
 783	static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 784	u32 sg_elems = host->data->sg_len;
 785	u32 fifoth_val;
 786	u32 fifo_offset = host->fifo_reg - host->regs;
 787	int ret = 0;
 788
 789	/* Set external dma config: burst size, burst width */
 790	memset(&cfg, 0, sizeof(cfg));
 791	cfg.dst_addr = host->phy_regs + fifo_offset;
 792	cfg.src_addr = cfg.dst_addr;
 793	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 794	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 795
 796	/* Match burst msize with external dma config */
 797	fifoth_val = mci_readl(host, FIFOTH);
 798	cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
 799	cfg.src_maxburst = cfg.dst_maxburst;
 800
 801	if (host->data->flags & MMC_DATA_WRITE)
 802		cfg.direction = DMA_MEM_TO_DEV;
 803	else
 804		cfg.direction = DMA_DEV_TO_MEM;
 805
 806	ret = dmaengine_slave_config(host->dms->ch, &cfg);
 807	if (ret) {
 808		dev_err(host->dev, "Failed to config edmac.\n");
 809		return -EBUSY;
 810	}
 811
 812	desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
 813				       sg_len, cfg.direction,
 814				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 815	if (!desc) {
 816		dev_err(host->dev, "Can't prepare slave sg.\n");
 817		return -EBUSY;
 818	}
 819
 820	/* Set dw_mci_dmac_complete_dma as callback */
 821	desc->callback = dw_mci_dmac_complete_dma;
 822	desc->callback_param = (void *)host;
 823	dmaengine_submit(desc);
 824
 825	/* Flush cache before write */
 826	if (host->data->flags & MMC_DATA_WRITE)
 827		dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
 828				       sg_elems, DMA_TO_DEVICE);
 829
 830	dma_async_issue_pending(host->dms->ch);
 
 831
 832	return 0;
 833}
 
 
 
 
 
 834
 835static int dw_mci_edmac_init(struct dw_mci *host)
 836{
 837	/* Request external dma channel */
 838	host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
 839	if (!host->dms)
 840		return -ENOMEM;
 841
 842	host->dms->ch = dma_request_chan(host->dev, "rx-tx");
 843	if (IS_ERR(host->dms->ch)) {
 844		int ret = PTR_ERR(host->dms->ch);
 845
 846		dev_err(host->dev, "Failed to get external DMA channel.\n");
 847		kfree(host->dms);
 848		host->dms = NULL;
 849		return ret;
 850	}
 851
 
 
 852	return 0;
 853}
 854
 855static void dw_mci_edmac_exit(struct dw_mci *host)
 856{
 857	if (host->dms) {
 858		if (host->dms->ch) {
 859			dma_release_channel(host->dms->ch);
 860			host->dms->ch = NULL;
 861		}
 862		kfree(host->dms);
 863		host->dms = NULL;
 864	}
 865}
 866
 867static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
 868	.init = dw_mci_edmac_init,
 869	.exit = dw_mci_edmac_exit,
 870	.start = dw_mci_edmac_start_dma,
 871	.stop = dw_mci_edmac_stop_dma,
 872	.complete = dw_mci_dmac_complete_dma,
 873	.cleanup = dw_mci_dma_cleanup,
 874};
 
 875
 876static int dw_mci_pre_dma_transfer(struct dw_mci *host,
 877				   struct mmc_data *data,
 878				   int cookie)
 879{
 880	struct scatterlist *sg;
 881	unsigned int i, sg_len;
 882
 883	if (data->host_cookie == COOKIE_PRE_MAPPED)
 884		return data->sg_len;
 885
 886	/*
 887	 * We don't do DMA on "complex" transfers, i.e. with
 888	 * non-word-aligned buffers or lengths. Also, we don't bother
 889	 * with all the DMA setup overhead for short transfers.
 890	 */
 891	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
 892		return -EINVAL;
 893
 894	if (data->blksz & 3)
 895		return -EINVAL;
 896
 897	for_each_sg(data->sg, sg, data->sg_len, i) {
 898		if (sg->offset & 3 || sg->length & 3)
 899			return -EINVAL;
 900	}
 901
 902	sg_len = dma_map_sg(host->dev,
 903			    data->sg,
 904			    data->sg_len,
 905			    mmc_get_dma_dir(data));
 906	if (sg_len == 0)
 907		return -EINVAL;
 908
 909	data->host_cookie = cookie;
 
 910
 911	return sg_len;
 912}
 913
 914static void dw_mci_pre_req(struct mmc_host *mmc,
 915			   struct mmc_request *mrq)
 
 916{
 917	struct dw_mci_slot *slot = mmc_priv(mmc);
 918	struct mmc_data *data = mrq->data;
 919
 920	if (!slot->host->use_dma || !data)
 921		return;
 922
 923	/* This data might be unmapped at this time */
 924	data->host_cookie = COOKIE_UNMAPPED;
 
 
 925
 926	if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
 927				COOKIE_PRE_MAPPED) < 0)
 928		data->host_cookie = COOKIE_UNMAPPED;
 929}
 930
 931static void dw_mci_post_req(struct mmc_host *mmc,
 932			    struct mmc_request *mrq,
 933			    int err)
 934{
 935	struct dw_mci_slot *slot = mmc_priv(mmc);
 936	struct mmc_data *data = mrq->data;
 937
 938	if (!slot->host->use_dma || !data)
 939		return;
 940
 941	if (data->host_cookie != COOKIE_UNMAPPED)
 942		dma_unmap_sg(slot->host->dev,
 943			     data->sg,
 944			     data->sg_len,
 945			     mmc_get_dma_dir(data));
 946	data->host_cookie = COOKIE_UNMAPPED;
 947}
 948
 949static int dw_mci_get_cd(struct mmc_host *mmc)
 950{
 951	int present;
 952	struct dw_mci_slot *slot = mmc_priv(mmc);
 953	struct dw_mci *host = slot->host;
 954	int gpio_cd = mmc_gpio_get_cd(mmc);
 955
 956	/* Use platform get_cd function, else try onboard card detect */
 957	if (((mmc->caps & MMC_CAP_NEEDS_POLL)
 958				|| !mmc_card_is_removable(mmc))) {
 959		present = 1;
 960
 961		if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
 962			if (mmc->caps & MMC_CAP_NEEDS_POLL) {
 963				dev_info(&mmc->class_dev,
 964					"card is polling.\n");
 965			} else {
 966				dev_info(&mmc->class_dev,
 967					"card is non-removable.\n");
 968			}
 969			set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
 970		}
 971
 972		return present;
 973	} else if (gpio_cd >= 0)
 974		present = gpio_cd;
 975	else
 976		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
 977			== 0 ? 1 : 0;
 978
 979	spin_lock_bh(&host->lock);
 980	if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
 981		dev_dbg(&mmc->class_dev, "card is present\n");
 982	else if (!present &&
 983			!test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
 984		dev_dbg(&mmc->class_dev, "card is not present\n");
 985	spin_unlock_bh(&host->lock);
 986
 987	return present;
 988}
 989
 990static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
 991{
 
 992	unsigned int blksz = data->blksz;
 993	static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 994	u32 fifo_width = 1 << host->data_shift;
 995	u32 blksz_depth = blksz / fifo_width, fifoth_val;
 996	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
 997	int idx = ARRAY_SIZE(mszs) - 1;
 998
 999	/* pio should ship this scenario */
1000	if (!host->use_dma)
1001		return;
1002
1003	tx_wmark = (host->fifo_depth) / 2;
1004	tx_wmark_invers = host->fifo_depth - tx_wmark;
1005
1006	/*
1007	 * MSIZE is '1',
1008	 * if blksz is not a multiple of the FIFO width
1009	 */
1010	if (blksz % fifo_width)
 
 
1011		goto done;
 
1012
1013	do {
1014		if (!((blksz_depth % mszs[idx]) ||
1015		     (tx_wmark_invers % mszs[idx]))) {
1016			msize = idx;
1017			rx_wmark = mszs[idx] - 1;
1018			break;
1019		}
1020	} while (--idx > 0);
1021	/*
1022	 * If idx is '0', it won't be tried
1023	 * Thus, initial values are uesed
1024	 */
1025done:
1026	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1027	mci_writel(host, FIFOTH, fifoth_val);
 
1028}
1029
1030static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1031{
1032	unsigned int blksz = data->blksz;
1033	u32 blksz_depth, fifo_depth;
1034	u16 thld_size;
1035	u8 enable;
1036
1037	/*
1038	 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1039	 * in the FIFO region, so we really shouldn't access it).
1040	 */
1041	if (host->verid < DW_MMC_240A ||
1042		(host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1043		return;
1044
1045	/*
1046	 * Card write Threshold is introduced since 2.80a
1047	 * It's used when HS400 mode is enabled.
1048	 */
1049	if (data->flags & MMC_DATA_WRITE &&
1050		host->timing != MMC_TIMING_MMC_HS400)
1051		goto disable;
1052
1053	if (data->flags & MMC_DATA_WRITE)
1054		enable = SDMMC_CARD_WR_THR_EN;
1055	else
1056		enable = SDMMC_CARD_RD_THR_EN;
1057
1058	if (host->timing != MMC_TIMING_MMC_HS200 &&
1059	    host->timing != MMC_TIMING_UHS_SDR104 &&
1060	    host->timing != MMC_TIMING_MMC_HS400)
1061		goto disable;
1062
1063	blksz_depth = blksz / (1 << host->data_shift);
1064	fifo_depth = host->fifo_depth;
1065
1066	if (blksz_depth > fifo_depth)
1067		goto disable;
1068
1069	/*
1070	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1071	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
1072	 * Currently just choose blksz.
1073	 */
1074	thld_size = blksz;
1075	mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1076	return;
1077
1078disable:
1079	mci_writel(host, CDTHRCTL, 0);
1080}
1081
1082static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1083{
1084	unsigned long irqflags;
1085	int sg_len;
1086	u32 temp;
1087
1088	host->using_dma = 0;
1089
1090	/* If we don't have a channel, we can't do DMA */
1091	if (!host->use_dma)
1092		return -ENODEV;
1093
1094	sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1095	if (sg_len < 0) {
1096		host->dma_ops->stop(host);
1097		return sg_len;
1098	}
1099
1100	host->using_dma = 1;
1101
1102	if (host->use_dma == TRANS_MODE_IDMAC)
1103		dev_vdbg(host->dev,
1104			 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1105			 (unsigned long)host->sg_cpu,
1106			 (unsigned long)host->sg_dma,
1107			 sg_len);
1108
1109	/*
1110	 * Decide the MSIZE and RX/TX Watermark.
1111	 * If current block size is same with previous size,
1112	 * no need to update fifoth.
1113	 */
1114	if (host->prev_blksz != data->blksz)
1115		dw_mci_adjust_fifoth(host, data);
1116
1117	/* Enable the DMA interface */
1118	temp = mci_readl(host, CTRL);
1119	temp |= SDMMC_CTRL_DMA_ENABLE;
1120	mci_writel(host, CTRL, temp);
1121
1122	/* Disable RX/TX IRQs, let DMA handle it */
1123	spin_lock_irqsave(&host->irq_lock, irqflags);
1124	temp = mci_readl(host, INTMASK);
1125	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1126	mci_writel(host, INTMASK, temp);
1127	spin_unlock_irqrestore(&host->irq_lock, irqflags);
1128
1129	if (host->dma_ops->start(host, sg_len)) {
1130		host->dma_ops->stop(host);
1131		/* We can't do DMA, try PIO for this one */
1132		dev_dbg(host->dev,
1133			"%s: fall back to PIO mode for current transfer\n",
1134			__func__);
1135		return -ENODEV;
1136	}
1137
1138	return 0;
1139}
1140
1141static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1142{
1143	unsigned long irqflags;
1144	int flags = SG_MITER_ATOMIC;
1145	u32 temp;
1146
1147	data->error = -EINPROGRESS;
1148
1149	WARN_ON(host->data);
1150	host->sg = NULL;
1151	host->data = data;
1152
1153	if (data->flags & MMC_DATA_READ)
1154		host->dir_status = DW_MCI_RECV_STATUS;
1155	else
 
1156		host->dir_status = DW_MCI_SEND_STATUS;
1157
1158	dw_mci_ctrl_thld(host, data);
1159
1160	if (dw_mci_submit_data_dma(host, data)) {
 
1161		if (host->data->flags & MMC_DATA_READ)
1162			flags |= SG_MITER_TO_SG;
1163		else
1164			flags |= SG_MITER_FROM_SG;
1165
1166		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1167		host->sg = data->sg;
1168		host->part_buf_start = 0;
1169		host->part_buf_count = 0;
1170
1171		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1172
1173		spin_lock_irqsave(&host->irq_lock, irqflags);
1174		temp = mci_readl(host, INTMASK);
1175		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1176		mci_writel(host, INTMASK, temp);
1177		spin_unlock_irqrestore(&host->irq_lock, irqflags);
1178
1179		temp = mci_readl(host, CTRL);
1180		temp &= ~SDMMC_CTRL_DMA_ENABLE;
1181		mci_writel(host, CTRL, temp);
1182
1183		/*
1184		 * Use the initial fifoth_val for PIO mode. If wm_algined
1185		 * is set, we set watermark same as data size.
1186		 * If next issued data may be transfered by DMA mode,
1187		 * prev_blksz should be invalidated.
1188		 */
1189		if (host->wm_aligned)
1190			dw_mci_adjust_fifoth(host, data);
1191		else
1192			mci_writel(host, FIFOTH, host->fifoth_val);
1193		host->prev_blksz = 0;
1194	} else {
1195		/*
1196		 * Keep the current block size.
1197		 * It will be used to decide whether to update
1198		 * fifoth register next time.
1199		 */
1200		host->prev_blksz = data->blksz;
1201	}
1202}
1203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1204static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1205{
1206	struct dw_mci *host = slot->host;
1207	unsigned int clock = slot->clock;
1208	u32 div;
1209	u32 clk_en_a;
1210	u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1211
1212	/* We must continue to set bit 28 in CMD until the change is complete */
1213	if (host->state == STATE_WAITING_CMD11_DONE)
1214		sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1215
1216	slot->mmc->actual_clock = 0;
1217
1218	if (!clock) {
1219		mci_writel(host, CLKENA, 0);
1220		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1221	} else if (clock != host->current_speed || force_clkinit) {
1222		div = host->bus_hz / clock;
1223		if (host->bus_hz % clock && host->bus_hz > clock)
1224			/*
1225			 * move the + 1 after the divide to prevent
1226			 * over-clocking the card.
1227			 */
1228			div += 1;
1229
1230		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1231
1232		if ((clock != slot->__clk_old &&
1233			!test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1234			force_clkinit) {
1235			/* Silent the verbose log if calling from PM context */
1236			if (!force_clkinit)
1237				dev_info(&slot->mmc->class_dev,
1238					 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1239					 slot->id, host->bus_hz, clock,
1240					 div ? ((host->bus_hz / div) >> 1) :
1241					 host->bus_hz, div);
1242
1243			/*
1244			 * If card is polling, display the message only
1245			 * one time at boot time.
1246			 */
1247			if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1248					slot->mmc->f_min == clock)
1249				set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1250		}
1251
1252		/* disable clock */
1253		mci_writel(host, CLKENA, 0);
1254		mci_writel(host, CLKSRC, 0);
1255
1256		/* inform CIU */
1257		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1258
1259		/* set clock to desired speed */
1260		mci_writel(host, CLKDIV, div);
1261
1262		/* inform CIU */
1263		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1264
1265		/* enable clock; only low power if no SDIO */
1266		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1267		if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1268			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1269		mci_writel(host, CLKENA, clk_en_a);
1270
1271		/* inform CIU */
1272		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1273
1274		/* keep the last clock value that was requested from core */
1275		slot->__clk_old = clock;
1276		slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1277					  host->bus_hz;
1278	}
1279
1280	host->current_speed = clock;
1281
1282	/* Set the current slot bus width */
1283	mci_writel(host, CTYPE, (slot->ctype << slot->id));
1284}
1285
1286static void dw_mci_set_data_timeout(struct dw_mci *host,
1287				    unsigned int timeout_ns)
1288{
1289	const struct dw_mci_drv_data *drv_data = host->drv_data;
1290	u32 clk_div, tmout;
1291	u64 tmp;
1292
1293	if (drv_data && drv_data->set_data_timeout)
1294		return drv_data->set_data_timeout(host, timeout_ns);
1295
1296	clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
1297	if (clk_div == 0)
1298		clk_div = 1;
1299
1300	tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
1301	tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
1302
1303	/* TMOUT[7:0] (RESPONSE_TIMEOUT) */
1304	tmout = 0xFF; /* Set maximum */
1305
1306	/* TMOUT[31:8] (DATA_TIMEOUT) */
1307	if (!tmp || tmp > 0xFFFFFF)
1308		tmout |= (0xFFFFFF << 8);
1309	else
1310		tmout |= (tmp & 0xFFFFFF) << 8;
1311
1312	mci_writel(host, TMOUT, tmout);
1313	dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
1314		timeout_ns, tmout >> 8);
1315}
1316
1317static void __dw_mci_start_request(struct dw_mci *host,
1318				   struct dw_mci_slot *slot,
1319				   struct mmc_command *cmd)
1320{
1321	struct mmc_request *mrq;
1322	struct mmc_data	*data;
1323	u32 cmdflags;
1324
1325	mrq = slot->mrq;
 
 
1326
 
1327	host->mrq = mrq;
1328
1329	host->pending_events = 0;
1330	host->completed_events = 0;
1331	host->cmd_status = 0;
1332	host->data_status = 0;
1333	host->dir_status = 0;
1334
1335	data = cmd->data;
1336	if (data) {
1337		dw_mci_set_data_timeout(host, data->timeout_ns);
1338		mci_writel(host, BYTCNT, data->blksz*data->blocks);
1339		mci_writel(host, BLKSIZ, data->blksz);
1340	}
1341
1342	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1343
1344	/* this is the first command, send the initialization clock */
1345	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1346		cmdflags |= SDMMC_CMD_INIT;
1347
1348	if (data) {
1349		dw_mci_submit_data(host, data);
1350		wmb(); /* drain writebuffer */
1351	}
1352
1353	dw_mci_start_command(host, cmd, cmdflags);
1354
1355	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1356		unsigned long irqflags;
1357
1358		/*
1359		 * Databook says to fail after 2ms w/ no response, but evidence
1360		 * shows that sometimes the cmd11 interrupt takes over 130ms.
1361		 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1362		 * is just about to roll over.
1363		 *
1364		 * We do this whole thing under spinlock and only if the
1365		 * command hasn't already completed (indicating the irq
1366		 * already ran so we don't want the timeout).
1367		 */
1368		spin_lock_irqsave(&host->irq_lock, irqflags);
1369		if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1370			mod_timer(&host->cmd11_timer,
1371				jiffies + msecs_to_jiffies(500) + 1);
1372		spin_unlock_irqrestore(&host->irq_lock, irqflags);
1373	}
1374
1375	host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1376}
1377
1378static void dw_mci_start_request(struct dw_mci *host,
1379				 struct dw_mci_slot *slot)
1380{
1381	struct mmc_request *mrq = slot->mrq;
1382	struct mmc_command *cmd;
1383
1384	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1385	__dw_mci_start_request(host, slot, cmd);
1386}
1387
1388/* must be called with host->lock held */
1389static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1390				 struct mmc_request *mrq)
1391{
1392	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1393		 host->state);
1394
1395	slot->mrq = mrq;
1396
1397	if (host->state == STATE_WAITING_CMD11_DONE) {
1398		dev_warn(&slot->mmc->class_dev,
1399			 "Voltage change didn't complete\n");
1400		/*
1401		 * this case isn't expected to happen, so we can
1402		 * either crash here or just try to continue on
1403		 * in the closest possible state
1404		 */
1405		host->state = STATE_IDLE;
1406	}
1407
1408	if (host->state == STATE_IDLE) {
1409		host->state = STATE_SENDING_CMD;
1410		dw_mci_start_request(host, slot);
1411	} else {
1412		list_add_tail(&slot->queue_node, &host->queue);
1413	}
1414}
1415
1416static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1417{
1418	struct dw_mci_slot *slot = mmc_priv(mmc);
1419	struct dw_mci *host = slot->host;
1420
1421	WARN_ON(slot->mrq);
1422
1423	/*
1424	 * The check for card presence and queueing of the request must be
1425	 * atomic, otherwise the card could be removed in between and the
1426	 * request wouldn't fail until another card was inserted.
1427	 */
 
1428
1429	if (!dw_mci_get_cd(mmc)) {
 
1430		mrq->cmd->error = -ENOMEDIUM;
1431		mmc_request_done(mmc, mrq);
1432		return;
1433	}
1434
1435	spin_lock_bh(&host->lock);
1436
1437	dw_mci_queue_request(host, slot, mrq);
1438
1439	spin_unlock_bh(&host->lock);
1440}
1441
1442static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1443{
1444	struct dw_mci_slot *slot = mmc_priv(mmc);
1445	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1446	u32 regs;
1447	int ret;
1448
1449	switch (ios->bus_width) {
1450	case MMC_BUS_WIDTH_4:
1451		slot->ctype = SDMMC_CTYPE_4BIT;
1452		break;
1453	case MMC_BUS_WIDTH_8:
1454		slot->ctype = SDMMC_CTYPE_8BIT;
1455		break;
1456	default:
1457		/* set default 1 bit mode */
1458		slot->ctype = SDMMC_CTYPE_1BIT;
1459	}
1460
1461	regs = mci_readl(slot->host, UHS_REG);
1462
1463	/* DDR mode set */
1464	if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1465	    ios->timing == MMC_TIMING_UHS_DDR50 ||
1466	    ios->timing == MMC_TIMING_MMC_HS400)
1467		regs |= ((0x1 << slot->id) << 16);
1468	else
1469		regs &= ~((0x1 << slot->id) << 16);
1470
1471	mci_writel(slot->host, UHS_REG, regs);
1472	slot->host->timing = ios->timing;
1473
1474	/*
1475	 * Use mirror of ios->clock to prevent race with mmc
1476	 * core ios update when finding the minimum.
1477	 */
1478	slot->clock = ios->clock;
1479
1480	if (drv_data && drv_data->set_ios)
1481		drv_data->set_ios(slot->host, ios);
1482
 
 
 
1483	switch (ios->power_mode) {
1484	case MMC_POWER_UP:
1485		if (!IS_ERR(mmc->supply.vmmc)) {
1486			ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1487					ios->vdd);
1488			if (ret) {
1489				dev_err(slot->host->dev,
1490					"failed to enable vmmc regulator\n");
1491				/*return, if failed turn on vmmc*/
1492				return;
1493			}
1494		}
1495		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
 
 
 
1496		regs = mci_readl(slot->host, PWREN);
1497		regs |= (1 << slot->id);
1498		mci_writel(slot->host, PWREN, regs);
1499		break;
1500	case MMC_POWER_ON:
1501		if (!slot->host->vqmmc_enabled) {
1502			if (!IS_ERR(mmc->supply.vqmmc)) {
1503				ret = regulator_enable(mmc->supply.vqmmc);
1504				if (ret < 0)
1505					dev_err(slot->host->dev,
1506						"failed to enable vqmmc\n");
1507				else
1508					slot->host->vqmmc_enabled = true;
1509
1510			} else {
1511				/* Keep track so we don't reset again */
1512				slot->host->vqmmc_enabled = true;
1513			}
1514
1515			/* Reset our state machine after powering on */
1516			dw_mci_ctrl_reset(slot->host,
1517					  SDMMC_CTRL_ALL_RESET_FLAGS);
1518		}
1519
1520		/* Adjust clock / bus width after power is up */
1521		dw_mci_setup_bus(slot, false);
1522
1523		break;
1524	case MMC_POWER_OFF:
1525		/* Turn clock off before power goes down */
1526		dw_mci_setup_bus(slot, false);
1527
1528		if (!IS_ERR(mmc->supply.vmmc))
1529			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1530
1531		if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1532			regulator_disable(mmc->supply.vqmmc);
1533		slot->host->vqmmc_enabled = false;
1534
1535		regs = mci_readl(slot->host, PWREN);
1536		regs &= ~(1 << slot->id);
1537		mci_writel(slot->host, PWREN, regs);
1538		break;
1539	default:
1540		break;
1541	}
1542
1543	if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1544		slot->host->state = STATE_IDLE;
1545}
1546
1547static int dw_mci_card_busy(struct mmc_host *mmc)
1548{
1549	struct dw_mci_slot *slot = mmc_priv(mmc);
1550	u32 status;
1551
1552	/*
1553	 * Check the busy bit which is low when DAT[3:0]
1554	 * (the data lines) are 0000
1555	 */
1556	status = mci_readl(slot->host, STATUS);
1557
1558	return !!(status & SDMMC_STATUS_BUSY);
1559}
1560
1561static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1562{
1563	struct dw_mci_slot *slot = mmc_priv(mmc);
1564	struct dw_mci *host = slot->host;
1565	const struct dw_mci_drv_data *drv_data = host->drv_data;
1566	u32 uhs;
1567	u32 v18 = SDMMC_UHS_18V << slot->id;
1568	int ret;
1569
1570	if (drv_data && drv_data->switch_voltage)
1571		return drv_data->switch_voltage(mmc, ios);
1572
1573	/*
1574	 * Program the voltage.  Note that some instances of dw_mmc may use
1575	 * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1576	 * does no harm but you need to set the regulator directly.  Try both.
1577	 */
1578	uhs = mci_readl(host, UHS_REG);
1579	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1580		uhs &= ~v18;
1581	else
1582		uhs |= v18;
1583
1584	if (!IS_ERR(mmc->supply.vqmmc)) {
1585		ret = mmc_regulator_set_vqmmc(mmc, ios);
1586		if (ret < 0) {
1587			dev_dbg(&mmc->class_dev,
1588					 "Regulator set error %d - %s V\n",
1589					 ret, uhs & v18 ? "1.8" : "3.3");
1590			return ret;
1591		}
1592	}
1593	mci_writel(host, UHS_REG, uhs);
1594
1595	return 0;
1596}
1597
1598static int dw_mci_get_ro(struct mmc_host *mmc)
1599{
1600	int read_only;
1601	struct dw_mci_slot *slot = mmc_priv(mmc);
1602	int gpio_ro = mmc_gpio_get_ro(mmc);
1603
1604	/* Use platform get_ro function, else try on board write protect */
1605	if (gpio_ro >= 0)
1606		read_only = gpio_ro;
 
 
 
 
1607	else
1608		read_only =
1609			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1610
1611	dev_dbg(&mmc->class_dev, "card is %s\n",
1612		read_only ? "read-only" : "read-write");
1613
1614	return read_only;
1615}
1616
1617static void dw_mci_hw_reset(struct mmc_host *mmc)
1618{
 
1619	struct dw_mci_slot *slot = mmc_priv(mmc);
 
1620	struct dw_mci *host = slot->host;
1621	int reset;
1622
1623	if (host->use_dma == TRANS_MODE_IDMAC)
1624		dw_mci_idmac_reset(host);
 
 
 
 
 
 
 
 
1625
1626	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1627				     SDMMC_CTRL_FIFO_RESET))
1628		return;
 
 
 
 
 
 
1629
1630	/*
1631	 * According to eMMC spec, card reset procedure:
1632	 * tRstW >= 1us:   RST_n pulse width
1633	 * tRSCA >= 200us: RST_n to Command time
1634	 * tRSTH >= 1us:   RST_n high period
1635	 */
1636	reset = mci_readl(host, RST_N);
1637	reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1638	mci_writel(host, RST_N, reset);
1639	usleep_range(1, 2);
1640	reset |= SDMMC_RST_HWACTIVE << slot->id;
1641	mci_writel(host, RST_N, reset);
1642	usleep_range(200, 300);
1643}
1644
1645static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare)
 
 
 
 
 
 
 
 
 
1646{
1647	struct dw_mci *host = slot->host;
1648	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1649	u32 clk_en_a_old;
1650	u32 clk_en_a;
 
1651
1652	/*
1653	 * Low power mode will stop the card clock when idle.  According to the
1654	 * description of the CLKENA register we should disable low power mode
1655	 * for SDIO cards if we need SDIO interrupts to work.
1656	 */
1657
1658	clk_en_a_old = mci_readl(host, CLKENA);
1659	if (prepare) {
1660		set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1661		clk_en_a = clk_en_a_old & ~clken_low_pwr;
1662	} else {
1663		clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1664		clk_en_a = clk_en_a_old | clken_low_pwr;
1665	}
1666
1667	if (clk_en_a != clk_en_a_old) {
1668		mci_writel(host, CLKENA, clk_en_a);
1669		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT,
1670			     0);
1671	}
1672}
1673
1674static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1675{
 
1676	struct dw_mci *host = slot->host;
1677	unsigned long irqflags;
1678	u32 int_mask;
1679
1680	spin_lock_irqsave(&host->irq_lock, irqflags);
1681
1682	/* Enable/disable Slot Specific SDIO interrupt */
1683	int_mask = mci_readl(host, INTMASK);
1684	if (enb)
1685		int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1686	else
1687		int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1688	mci_writel(host, INTMASK, int_mask);
1689
1690	spin_unlock_irqrestore(&host->irq_lock, irqflags);
1691}
1692
1693static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1694{
1695	struct dw_mci_slot *slot = mmc_priv(mmc);
1696	struct dw_mci *host = slot->host;
1697
1698	dw_mci_prepare_sdio_irq(slot, enb);
1699	__dw_mci_enable_sdio_irq(slot, enb);
1700
1701	/* Avoid runtime suspending the device when SDIO IRQ is enabled */
1702	if (enb)
1703		pm_runtime_get_noresume(host->dev);
1704	else
1705		pm_runtime_put_noidle(host->dev);
1706}
1707
1708static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1709{
1710	struct dw_mci_slot *slot = mmc_priv(mmc);
1711
1712	__dw_mci_enable_sdio_irq(slot, 1);
 
 
 
 
 
1713}
1714
1715static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1716{
1717	struct dw_mci_slot *slot = mmc_priv(mmc);
1718	struct dw_mci *host = slot->host;
1719	const struct dw_mci_drv_data *drv_data = host->drv_data;
1720	int err = -EINVAL;
1721
1722	if (drv_data && drv_data->execute_tuning)
1723		err = drv_data->execute_tuning(slot, opcode);
1724	return err;
1725}
1726
1727static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1728				       struct mmc_ios *ios)
1729{
1730	struct dw_mci_slot *slot = mmc_priv(mmc);
1731	struct dw_mci *host = slot->host;
1732	const struct dw_mci_drv_data *drv_data = host->drv_data;
1733
1734	if (drv_data && drv_data->prepare_hs400_tuning)
1735		return drv_data->prepare_hs400_tuning(host, ios);
1736
1737	return 0;
1738}
1739
1740static bool dw_mci_reset(struct dw_mci *host)
1741{
1742	u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1743	bool ret = false;
1744	u32 status = 0;
1745
1746	/*
1747	 * Resetting generates a block interrupt, hence setting
1748	 * the scatter-gather pointer to NULL.
1749	 */
1750	if (host->sg) {
1751		sg_miter_stop(&host->sg_miter);
1752		host->sg = NULL;
1753	}
1754
1755	if (host->use_dma)
1756		flags |= SDMMC_CTRL_DMA_RESET;
1757
1758	if (dw_mci_ctrl_reset(host, flags)) {
1759		/*
1760		 * In all cases we clear the RAWINTS
1761		 * register to clear any interrupts.
1762		 */
1763		mci_writel(host, RINTSTS, 0xFFFFFFFF);
1764
1765		if (!host->use_dma) {
1766			ret = true;
1767			goto ciu_out;
1768		}
1769
1770		/* Wait for dma_req to be cleared */
1771		if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1772					      status,
1773					      !(status & SDMMC_STATUS_DMA_REQ),
1774					      1, 500 * USEC_PER_MSEC)) {
1775			dev_err(host->dev,
1776				"%s: Timeout waiting for dma_req to be cleared\n",
1777				__func__);
1778			goto ciu_out;
1779		}
1780
1781		/* when using DMA next we reset the fifo again */
1782		if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1783			goto ciu_out;
1784	} else {
1785		/* if the controller reset bit did clear, then set clock regs */
1786		if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1787			dev_err(host->dev,
1788				"%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1789				__func__);
1790			goto ciu_out;
1791		}
1792	}
1793
1794	if (host->use_dma == TRANS_MODE_IDMAC)
1795		/* It is also required that we reinit idmac */
1796		dw_mci_idmac_init(host);
1797
1798	ret = true;
1799
1800ciu_out:
1801	/* After a CTRL reset we need to have CIU set clock registers  */
1802	mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1803
1804	return ret;
1805}
1806
1807static const struct mmc_host_ops dw_mci_ops = {
1808	.request		= dw_mci_request,
1809	.pre_req		= dw_mci_pre_req,
1810	.post_req		= dw_mci_post_req,
1811	.set_ios		= dw_mci_set_ios,
1812	.get_ro			= dw_mci_get_ro,
1813	.get_cd			= dw_mci_get_cd,
1814	.card_hw_reset          = dw_mci_hw_reset,
1815	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
1816	.ack_sdio_irq		= dw_mci_ack_sdio_irq,
1817	.execute_tuning		= dw_mci_execute_tuning,
1818	.card_busy		= dw_mci_card_busy,
1819	.start_signal_voltage_switch = dw_mci_switch_voltage,
1820	.prepare_hs400_tuning	= dw_mci_prepare_hs400_tuning,
1821};
1822
1823#ifdef CONFIG_FAULT_INJECTION
1824static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
1825{
1826	struct dw_mci *host = container_of(t, struct dw_mci, fault_timer);
1827	unsigned long flags;
1828
1829	spin_lock_irqsave(&host->irq_lock, flags);
1830
1831	/*
1832	 * Only inject an error if we haven't already got an error or data over
1833	 * interrupt.
1834	 */
1835	if (!host->data_status) {
1836		host->data_status = SDMMC_INT_DCRC;
1837		set_bit(EVENT_DATA_ERROR, &host->pending_events);
1838		tasklet_schedule(&host->tasklet);
1839	}
1840
1841	spin_unlock_irqrestore(&host->irq_lock, flags);
1842
1843	return HRTIMER_NORESTART;
1844}
1845
1846static void dw_mci_start_fault_timer(struct dw_mci *host)
1847{
1848	struct mmc_data *data = host->data;
1849
1850	if (!data || data->blocks <= 1)
1851		return;
1852
1853	if (!should_fail(&host->fail_data_crc, 1))
1854		return;
1855
1856	/*
1857	 * Try to inject the error at random points during the data transfer.
1858	 */
1859	hrtimer_start(&host->fault_timer,
1860		      ms_to_ktime(get_random_u32_below(25)),
1861		      HRTIMER_MODE_REL);
1862}
1863
1864static void dw_mci_stop_fault_timer(struct dw_mci *host)
1865{
1866	hrtimer_cancel(&host->fault_timer);
1867}
1868
1869static void dw_mci_init_fault(struct dw_mci *host)
1870{
1871	host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
1872
1873	hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1874	host->fault_timer.function = dw_mci_fault_timer;
1875}
1876#else
1877static void dw_mci_init_fault(struct dw_mci *host)
1878{
1879}
1880
1881static void dw_mci_start_fault_timer(struct dw_mci *host)
1882{
1883}
1884
1885static void dw_mci_stop_fault_timer(struct dw_mci *host)
1886{
1887}
1888#endif
1889
1890static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1891	__releases(&host->lock)
1892	__acquires(&host->lock)
1893{
1894	struct dw_mci_slot *slot;
1895	struct mmc_host	*prev_mmc = host->slot->mmc;
1896
1897	WARN_ON(host->cmd || host->data);
1898
1899	host->slot->mrq = NULL;
1900	host->mrq = NULL;
1901	if (!list_empty(&host->queue)) {
1902		slot = list_entry(host->queue.next,
1903				  struct dw_mci_slot, queue_node);
1904		list_del(&slot->queue_node);
1905		dev_vdbg(host->dev, "list not empty: %s is next\n",
1906			 mmc_hostname(slot->mmc));
1907		host->state = STATE_SENDING_CMD;
1908		dw_mci_start_request(host, slot);
1909	} else {
1910		dev_vdbg(host->dev, "list empty\n");
1911
1912		if (host->state == STATE_SENDING_CMD11)
1913			host->state = STATE_WAITING_CMD11_DONE;
1914		else
1915			host->state = STATE_IDLE;
1916	}
1917
1918	spin_unlock(&host->lock);
1919	mmc_request_done(prev_mmc, mrq);
1920	spin_lock(&host->lock);
1921}
1922
1923static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1924{
1925	u32 status = host->cmd_status;
1926
1927	host->cmd_status = 0;
1928
1929	/* Read the response from the card (up to 16 bytes) */
1930	if (cmd->flags & MMC_RSP_PRESENT) {
1931		if (cmd->flags & MMC_RSP_136) {
1932			cmd->resp[3] = mci_readl(host, RESP0);
1933			cmd->resp[2] = mci_readl(host, RESP1);
1934			cmd->resp[1] = mci_readl(host, RESP2);
1935			cmd->resp[0] = mci_readl(host, RESP3);
1936		} else {
1937			cmd->resp[0] = mci_readl(host, RESP0);
1938			cmd->resp[1] = 0;
1939			cmd->resp[2] = 0;
1940			cmd->resp[3] = 0;
1941		}
1942	}
1943
1944	if (status & SDMMC_INT_RTO)
1945		cmd->error = -ETIMEDOUT;
1946	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1947		cmd->error = -EILSEQ;
1948	else if (status & SDMMC_INT_RESP_ERR)
1949		cmd->error = -EIO;
1950	else
1951		cmd->error = 0;
1952
 
 
 
 
 
 
1953	return cmd->error;
1954}
1955
1956static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1957{
1958	u32 status = host->data_status;
1959
1960	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1961		if (status & SDMMC_INT_DRTO) {
1962			data->error = -ETIMEDOUT;
1963		} else if (status & SDMMC_INT_DCRC) {
1964			data->error = -EILSEQ;
1965		} else if (status & SDMMC_INT_EBE) {
1966			if (host->dir_status ==
1967				DW_MCI_SEND_STATUS) {
1968				/*
1969				 * No data CRC status was returned.
1970				 * The number of bytes transferred
1971				 * will be exaggerated in PIO mode.
1972				 */
1973				data->bytes_xfered = 0;
1974				data->error = -ETIMEDOUT;
1975			} else if (host->dir_status ==
1976					DW_MCI_RECV_STATUS) {
1977				data->error = -EILSEQ;
1978			}
1979		} else {
1980			/* SDMMC_INT_SBE is included */
1981			data->error = -EILSEQ;
1982		}
1983
1984		dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1985
1986		/*
1987		 * After an error, there may be data lingering
1988		 * in the FIFO
1989		 */
1990		dw_mci_reset(host);
1991	} else {
1992		data->bytes_xfered = data->blocks * data->blksz;
1993		data->error = 0;
1994	}
1995
1996	return data->error;
1997}
1998
1999static void dw_mci_set_drto(struct dw_mci *host)
2000{
2001	const struct dw_mci_drv_data *drv_data = host->drv_data;
2002	unsigned int drto_clks;
2003	unsigned int drto_div;
2004	unsigned int drto_ms;
2005	unsigned long irqflags;
2006
2007	if (drv_data && drv_data->get_drto_clks)
2008		drto_clks = drv_data->get_drto_clks(host);
2009	else
2010		drto_clks = mci_readl(host, TMOUT) >> 8;
2011	drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
2012	if (drto_div == 0)
2013		drto_div = 1;
2014
2015	drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
2016				   host->bus_hz);
2017
2018	dev_dbg(host->dev, "drto_ms: %u\n", drto_ms);
2019
2020	/* add a bit spare time */
2021	drto_ms += 10;
2022
2023	spin_lock_irqsave(&host->irq_lock, irqflags);
2024	if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2025		mod_timer(&host->dto_timer,
2026			  jiffies + msecs_to_jiffies(drto_ms));
2027	spin_unlock_irqrestore(&host->irq_lock, irqflags);
2028}
2029
2030static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
2031{
2032	if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2033		return false;
2034
2035	/*
2036	 * Really be certain that the timer has stopped.  This is a bit of
2037	 * paranoia and could only really happen if we had really bad
2038	 * interrupt latency and the interrupt routine and timeout were
2039	 * running concurrently so that the del_timer() in the interrupt
2040	 * handler couldn't run.
2041	 */
2042	WARN_ON(del_timer_sync(&host->cto_timer));
2043	clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2044
2045	return true;
2046}
2047
2048static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
2049{
2050	if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2051		return false;
2052
2053	/* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
2054	WARN_ON(del_timer_sync(&host->dto_timer));
2055	clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2056
2057	return true;
2058}
2059
2060static void dw_mci_tasklet_func(struct tasklet_struct *t)
2061{
2062	struct dw_mci *host = from_tasklet(host, t, tasklet);
2063	struct mmc_data	*data;
2064	struct mmc_command *cmd;
2065	struct mmc_request *mrq;
2066	enum dw_mci_state state;
2067	enum dw_mci_state prev_state;
2068	unsigned int err;
2069
2070	spin_lock(&host->lock);
2071
2072	state = host->state;
2073	data = host->data;
2074	mrq = host->mrq;
2075
2076	do {
2077		prev_state = state;
2078
2079		switch (state) {
2080		case STATE_IDLE:
2081		case STATE_WAITING_CMD11_DONE:
2082			break;
2083
2084		case STATE_SENDING_CMD11:
2085		case STATE_SENDING_CMD:
2086			if (!dw_mci_clear_pending_cmd_complete(host))
 
2087				break;
2088
2089			cmd = host->cmd;
2090			host->cmd = NULL;
2091			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2092			err = dw_mci_command_complete(host, cmd);
2093			if (cmd == mrq->sbc && !err) {
2094				__dw_mci_start_request(host, host->slot,
 
2095						       mrq->cmd);
2096				goto unlock;
2097			}
2098
2099			if (cmd->data && err) {
2100				/*
2101				 * During UHS tuning sequence, sending the stop
2102				 * command after the response CRC error would
2103				 * throw the system into a confused state
2104				 * causing all future tuning phases to report
2105				 * failure.
2106				 *
2107				 * In such case controller will move into a data
2108				 * transfer state after a response error or
2109				 * response CRC error. Let's let that finish
2110				 * before trying to send a stop, so we'll go to
2111				 * STATE_SENDING_DATA.
2112				 *
2113				 * Although letting the data transfer take place
2114				 * will waste a bit of time (we already know
2115				 * the command was bad), it can't cause any
2116				 * errors since it's possible it would have
2117				 * taken place anyway if this tasklet got
2118				 * delayed. Allowing the transfer to take place
2119				 * avoids races and keeps things simple.
2120				 */
2121				if (err != -ETIMEDOUT &&
2122				    host->dir_status == DW_MCI_RECV_STATUS) {
2123					state = STATE_SENDING_DATA;
2124					continue;
2125				}
2126
2127				send_stop_abort(host, data);
2128				dw_mci_stop_dma(host);
 
2129				state = STATE_SENDING_STOP;
2130				break;
2131			}
2132
2133			if (!cmd->data || err) {
2134				dw_mci_request_end(host, mrq);
2135				goto unlock;
2136			}
2137
2138			prev_state = state = STATE_SENDING_DATA;
2139			fallthrough;
2140
2141		case STATE_SENDING_DATA:
2142			/*
2143			 * We could get a data error and never a transfer
2144			 * complete so we'd better check for it here.
2145			 *
2146			 * Note that we don't really care if we also got a
2147			 * transfer complete; stopping the DMA and sending an
2148			 * abort won't hurt.
2149			 */
2150			if (test_and_clear_bit(EVENT_DATA_ERROR,
2151					       &host->pending_events)) {
2152				if (!(host->data_status & (SDMMC_INT_DRTO |
2153							   SDMMC_INT_EBE)))
2154					send_stop_abort(host, data);
2155				dw_mci_stop_dma(host);
 
2156				state = STATE_DATA_ERROR;
2157				break;
2158			}
2159
2160			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2161						&host->pending_events)) {
2162				/*
2163				 * If all data-related interrupts don't come
2164				 * within the given time in reading data state.
2165				 */
2166				if (host->dir_status == DW_MCI_RECV_STATUS)
2167					dw_mci_set_drto(host);
2168				break;
2169			}
2170
2171			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2172
2173			/*
2174			 * Handle an EVENT_DATA_ERROR that might have shown up
2175			 * before the transfer completed.  This might not have
2176			 * been caught by the check above because the interrupt
2177			 * could have gone off between the previous check and
2178			 * the check for transfer complete.
2179			 *
2180			 * Technically this ought not be needed assuming we
2181			 * get a DATA_COMPLETE eventually (we'll notice the
2182			 * error and end the request), but it shouldn't hurt.
2183			 *
2184			 * This has the advantage of sending the stop command.
2185			 */
2186			if (test_and_clear_bit(EVENT_DATA_ERROR,
2187					       &host->pending_events)) {
2188				if (!(host->data_status & (SDMMC_INT_DRTO |
2189							   SDMMC_INT_EBE)))
2190					send_stop_abort(host, data);
2191				dw_mci_stop_dma(host);
2192				state = STATE_DATA_ERROR;
2193				break;
2194			}
2195			prev_state = state = STATE_DATA_BUSY;
2196
2197			fallthrough;
2198
2199		case STATE_DATA_BUSY:
2200			if (!dw_mci_clear_pending_data_complete(host)) {
2201				/*
2202				 * If data error interrupt comes but data over
2203				 * interrupt doesn't come within the given time.
2204				 * in reading data state.
2205				 */
2206				if (host->dir_status == DW_MCI_RECV_STATUS)
2207					dw_mci_set_drto(host);
2208				break;
2209			}
2210
2211			dw_mci_stop_fault_timer(host);
2212			host->data = NULL;
2213			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2214			err = dw_mci_data_complete(host, data);
2215
2216			if (!err) {
2217				if (!data->stop || mrq->sbc) {
2218					if (mrq->sbc && data->stop)
2219						data->stop->error = 0;
2220					dw_mci_request_end(host, mrq);
2221					goto unlock;
2222				}
2223
2224				/* stop command for open-ended transfer*/
2225				if (data->stop)
2226					send_stop_abort(host, data);
2227			} else {
2228				/*
2229				 * If we don't have a command complete now we'll
2230				 * never get one since we just reset everything;
2231				 * better end the request.
2232				 *
2233				 * If we do have a command complete we'll fall
2234				 * through to the SENDING_STOP command and
2235				 * everything will be peachy keen.
2236				 */
2237				if (!test_bit(EVENT_CMD_COMPLETE,
2238					      &host->pending_events)) {
2239					host->cmd = NULL;
2240					dw_mci_request_end(host, mrq);
2241					goto unlock;
2242				}
2243			}
2244
2245			/*
2246			 * If err has non-zero,
2247			 * stop-abort command has been already issued.
2248			 */
2249			prev_state = state = STATE_SENDING_STOP;
2250
2251			fallthrough;
2252
2253		case STATE_SENDING_STOP:
2254			if (!dw_mci_clear_pending_cmd_complete(host))
 
2255				break;
2256
2257			/* CMD error in data command */
2258			if (mrq->cmd->error && mrq->data)
2259				dw_mci_reset(host);
2260
2261			dw_mci_stop_fault_timer(host);
2262			host->cmd = NULL;
2263			host->data = NULL;
2264
2265			if (!mrq->sbc && mrq->stop)
2266				dw_mci_command_complete(host, mrq->stop);
2267			else
2268				host->cmd_status = 0;
2269
2270			dw_mci_request_end(host, mrq);
2271			goto unlock;
2272
2273		case STATE_DATA_ERROR:
2274			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2275						&host->pending_events))
2276				break;
2277
2278			state = STATE_DATA_BUSY;
2279			break;
2280		}
2281	} while (state != prev_state);
2282
2283	host->state = state;
2284unlock:
2285	spin_unlock(&host->lock);
2286
2287}
2288
2289/* push final bytes to part_buf, only use during push */
2290static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2291{
2292	memcpy((void *)&host->part_buf, buf, cnt);
2293	host->part_buf_count = cnt;
2294}
2295
2296/* append bytes to part_buf, only use during push */
2297static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2298{
2299	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2300	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2301	host->part_buf_count += cnt;
2302	return cnt;
2303}
2304
2305/* pull first bytes from part_buf, only use during pull */
2306static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2307{
2308	cnt = min_t(int, cnt, host->part_buf_count);
2309	if (cnt) {
2310		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2311		       cnt);
2312		host->part_buf_count -= cnt;
2313		host->part_buf_start += cnt;
2314	}
2315	return cnt;
2316}
2317
2318/* pull final bytes from the part_buf, assuming it's just been filled */
2319static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2320{
2321	memcpy(buf, &host->part_buf, cnt);
2322	host->part_buf_start = cnt;
2323	host->part_buf_count = (1 << host->data_shift) - cnt;
2324}
2325
2326static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2327{
2328	struct mmc_data *data = host->data;
2329	int init_cnt = cnt;
2330
2331	/* try and push anything in the part_buf */
2332	if (unlikely(host->part_buf_count)) {
2333		int len = dw_mci_push_part_bytes(host, buf, cnt);
2334
2335		buf += len;
2336		cnt -= len;
2337		if (host->part_buf_count == 2) {
2338			mci_fifo_writew(host->fifo_reg, host->part_buf16);
 
2339			host->part_buf_count = 0;
2340		}
2341	}
2342#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2343	if (unlikely((unsigned long)buf & 0x1)) {
2344		while (cnt >= 2) {
2345			u16 aligned_buf[64];
2346			int len = min(cnt & -2, (int)sizeof(aligned_buf));
2347			int items = len >> 1;
2348			int i;
2349			/* memcpy from input buffer into aligned buffer */
2350			memcpy(aligned_buf, buf, len);
2351			buf += len;
2352			cnt -= len;
2353			/* push data from aligned buffer into fifo */
2354			for (i = 0; i < items; ++i)
2355				mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
 
2356		}
2357	} else
2358#endif
2359	{
2360		u16 *pdata = buf;
2361
2362		for (; cnt >= 2; cnt -= 2)
2363			mci_fifo_writew(host->fifo_reg, *pdata++);
2364		buf = pdata;
2365	}
2366	/* put anything remaining in the part_buf */
2367	if (cnt) {
2368		dw_mci_set_part_bytes(host, buf, cnt);
2369		 /* Push data if we have reached the expected data length */
2370		if ((data->bytes_xfered + init_cnt) ==
2371		    (data->blksz * data->blocks))
2372			mci_fifo_writew(host->fifo_reg, host->part_buf16);
 
2373	}
2374}
2375
2376static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2377{
2378#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2379	if (unlikely((unsigned long)buf & 0x1)) {
2380		while (cnt >= 2) {
2381			/* pull data from fifo into aligned buffer */
2382			u16 aligned_buf[64];
2383			int len = min(cnt & -2, (int)sizeof(aligned_buf));
2384			int items = len >> 1;
2385			int i;
2386
2387			for (i = 0; i < items; ++i)
2388				aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
 
2389			/* memcpy from aligned buffer into output buffer */
2390			memcpy(buf, aligned_buf, len);
2391			buf += len;
2392			cnt -= len;
2393		}
2394	} else
2395#endif
2396	{
2397		u16 *pdata = buf;
2398
2399		for (; cnt >= 2; cnt -= 2)
2400			*pdata++ = mci_fifo_readw(host->fifo_reg);
2401		buf = pdata;
2402	}
2403	if (cnt) {
2404		host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2405		dw_mci_pull_final_bytes(host, buf, cnt);
2406	}
2407}
2408
2409static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2410{
2411	struct mmc_data *data = host->data;
2412	int init_cnt = cnt;
2413
2414	/* try and push anything in the part_buf */
2415	if (unlikely(host->part_buf_count)) {
2416		int len = dw_mci_push_part_bytes(host, buf, cnt);
2417
2418		buf += len;
2419		cnt -= len;
2420		if (host->part_buf_count == 4) {
2421			mci_fifo_writel(host->fifo_reg,	host->part_buf32);
 
2422			host->part_buf_count = 0;
2423		}
2424	}
2425#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2426	if (unlikely((unsigned long)buf & 0x3)) {
2427		while (cnt >= 4) {
2428			u32 aligned_buf[32];
2429			int len = min(cnt & -4, (int)sizeof(aligned_buf));
2430			int items = len >> 2;
2431			int i;
2432			/* memcpy from input buffer into aligned buffer */
2433			memcpy(aligned_buf, buf, len);
2434			buf += len;
2435			cnt -= len;
2436			/* push data from aligned buffer into fifo */
2437			for (i = 0; i < items; ++i)
2438				mci_fifo_writel(host->fifo_reg,	aligned_buf[i]);
 
2439		}
2440	} else
2441#endif
2442	{
2443		u32 *pdata = buf;
2444
2445		for (; cnt >= 4; cnt -= 4)
2446			mci_fifo_writel(host->fifo_reg, *pdata++);
2447		buf = pdata;
2448	}
2449	/* put anything remaining in the part_buf */
2450	if (cnt) {
2451		dw_mci_set_part_bytes(host, buf, cnt);
2452		 /* Push data if we have reached the expected data length */
2453		if ((data->bytes_xfered + init_cnt) ==
2454		    (data->blksz * data->blocks))
2455			mci_fifo_writel(host->fifo_reg, host->part_buf32);
 
2456	}
2457}
2458
2459static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2460{
2461#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2462	if (unlikely((unsigned long)buf & 0x3)) {
2463		while (cnt >= 4) {
2464			/* pull data from fifo into aligned buffer */
2465			u32 aligned_buf[32];
2466			int len = min(cnt & -4, (int)sizeof(aligned_buf));
2467			int items = len >> 2;
2468			int i;
2469
2470			for (i = 0; i < items; ++i)
2471				aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
 
2472			/* memcpy from aligned buffer into output buffer */
2473			memcpy(buf, aligned_buf, len);
2474			buf += len;
2475			cnt -= len;
2476		}
2477	} else
2478#endif
2479	{
2480		u32 *pdata = buf;
2481
2482		for (; cnt >= 4; cnt -= 4)
2483			*pdata++ = mci_fifo_readl(host->fifo_reg);
2484		buf = pdata;
2485	}
2486	if (cnt) {
2487		host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2488		dw_mci_pull_final_bytes(host, buf, cnt);
2489	}
2490}
2491
2492static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2493{
2494	struct mmc_data *data = host->data;
2495	int init_cnt = cnt;
2496
2497	/* try and push anything in the part_buf */
2498	if (unlikely(host->part_buf_count)) {
2499		int len = dw_mci_push_part_bytes(host, buf, cnt);
2500
2501		buf += len;
2502		cnt -= len;
2503
2504		if (host->part_buf_count == 8) {
2505			mci_fifo_writeq(host->fifo_reg,	host->part_buf);
 
2506			host->part_buf_count = 0;
2507		}
2508	}
2509#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2510	if (unlikely((unsigned long)buf & 0x7)) {
2511		while (cnt >= 8) {
2512			u64 aligned_buf[16];
2513			int len = min(cnt & -8, (int)sizeof(aligned_buf));
2514			int items = len >> 3;
2515			int i;
2516			/* memcpy from input buffer into aligned buffer */
2517			memcpy(aligned_buf, buf, len);
2518			buf += len;
2519			cnt -= len;
2520			/* push data from aligned buffer into fifo */
2521			for (i = 0; i < items; ++i)
2522				mci_fifo_writeq(host->fifo_reg,	aligned_buf[i]);
 
2523		}
2524	} else
2525#endif
2526	{
2527		u64 *pdata = buf;
2528
2529		for (; cnt >= 8; cnt -= 8)
2530			mci_fifo_writeq(host->fifo_reg, *pdata++);
2531		buf = pdata;
2532	}
2533	/* put anything remaining in the part_buf */
2534	if (cnt) {
2535		dw_mci_set_part_bytes(host, buf, cnt);
2536		/* Push data if we have reached the expected data length */
2537		if ((data->bytes_xfered + init_cnt) ==
2538		    (data->blksz * data->blocks))
2539			mci_fifo_writeq(host->fifo_reg, host->part_buf);
 
2540	}
2541}
2542
2543static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2544{
2545#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2546	if (unlikely((unsigned long)buf & 0x7)) {
2547		while (cnt >= 8) {
2548			/* pull data from fifo into aligned buffer */
2549			u64 aligned_buf[16];
2550			int len = min(cnt & -8, (int)sizeof(aligned_buf));
2551			int items = len >> 3;
2552			int i;
2553
2554			for (i = 0; i < items; ++i)
2555				aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2556
2557			/* memcpy from aligned buffer into output buffer */
2558			memcpy(buf, aligned_buf, len);
2559			buf += len;
2560			cnt -= len;
2561		}
2562	} else
2563#endif
2564	{
2565		u64 *pdata = buf;
2566
2567		for (; cnt >= 8; cnt -= 8)
2568			*pdata++ = mci_fifo_readq(host->fifo_reg);
2569		buf = pdata;
2570	}
2571	if (cnt) {
2572		host->part_buf = mci_fifo_readq(host->fifo_reg);
2573		dw_mci_pull_final_bytes(host, buf, cnt);
2574	}
2575}
2576
2577static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2578{
2579	int len;
2580
2581	/* get remaining partial bytes */
2582	len = dw_mci_pull_part_bytes(host, buf, cnt);
2583	if (unlikely(len == cnt))
2584		return;
2585	buf += len;
2586	cnt -= len;
2587
2588	/* get the rest of the data */
2589	host->pull_data(host, buf, cnt);
2590}
2591
2592static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2593{
2594	struct sg_mapping_iter *sg_miter = &host->sg_miter;
2595	void *buf;
2596	unsigned int offset;
2597	struct mmc_data	*data = host->data;
2598	int shift = host->data_shift;
2599	u32 status;
2600	unsigned int len;
2601	unsigned int remain, fcnt;
2602
2603	do {
2604		if (!sg_miter_next(sg_miter))
2605			goto done;
2606
2607		host->sg = sg_miter->piter.sg;
2608		buf = sg_miter->addr;
2609		remain = sg_miter->length;
2610		offset = 0;
2611
2612		do {
2613			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2614					<< shift) + host->part_buf_count;
2615			len = min(remain, fcnt);
2616			if (!len)
2617				break;
2618			dw_mci_pull_data(host, (void *)(buf + offset), len);
2619			data->bytes_xfered += len;
2620			offset += len;
2621			remain -= len;
2622		} while (remain);
2623
2624		sg_miter->consumed = offset;
2625		status = mci_readl(host, MINTSTS);
2626		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2627	/* if the RXDR is ready read again */
2628	} while ((status & SDMMC_INT_RXDR) ||
2629		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2630
2631	if (!remain) {
2632		if (!sg_miter_next(sg_miter))
2633			goto done;
2634		sg_miter->consumed = 0;
2635	}
2636	sg_miter_stop(sg_miter);
2637	return;
2638
2639done:
2640	sg_miter_stop(sg_miter);
2641	host->sg = NULL;
2642	smp_wmb(); /* drain writebuffer */
2643	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2644}
2645
2646static void dw_mci_write_data_pio(struct dw_mci *host)
2647{
2648	struct sg_mapping_iter *sg_miter = &host->sg_miter;
2649	void *buf;
2650	unsigned int offset;
2651	struct mmc_data	*data = host->data;
2652	int shift = host->data_shift;
2653	u32 status;
2654	unsigned int len;
2655	unsigned int fifo_depth = host->fifo_depth;
2656	unsigned int remain, fcnt;
2657
2658	do {
2659		if (!sg_miter_next(sg_miter))
2660			goto done;
2661
2662		host->sg = sg_miter->piter.sg;
2663		buf = sg_miter->addr;
2664		remain = sg_miter->length;
2665		offset = 0;
2666
2667		do {
2668			fcnt = ((fifo_depth -
2669				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2670					<< shift) - host->part_buf_count;
2671			len = min(remain, fcnt);
2672			if (!len)
2673				break;
2674			host->push_data(host, (void *)(buf + offset), len);
2675			data->bytes_xfered += len;
2676			offset += len;
2677			remain -= len;
2678		} while (remain);
2679
2680		sg_miter->consumed = offset;
2681		status = mci_readl(host, MINTSTS);
2682		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2683	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2684
2685	if (!remain) {
2686		if (!sg_miter_next(sg_miter))
2687			goto done;
2688		sg_miter->consumed = 0;
2689	}
2690	sg_miter_stop(sg_miter);
2691	return;
2692
2693done:
2694	sg_miter_stop(sg_miter);
2695	host->sg = NULL;
2696	smp_wmb(); /* drain writebuffer */
2697	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2698}
2699
2700static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2701{
2702	del_timer(&host->cto_timer);
2703
2704	if (!host->cmd_status)
2705		host->cmd_status = status;
2706
2707	smp_wmb(); /* drain writebuffer */
2708
2709	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2710	tasklet_schedule(&host->tasklet);
2711
2712	dw_mci_start_fault_timer(host);
2713}
2714
2715static void dw_mci_handle_cd(struct dw_mci *host)
2716{
2717	struct dw_mci_slot *slot = host->slot;
2718
2719	mmc_detect_change(slot->mmc,
2720		msecs_to_jiffies(host->pdata->detect_delay_ms));
2721}
2722
2723static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2724{
2725	struct dw_mci *host = dev_id;
2726	u32 pending;
2727	struct dw_mci_slot *slot = host->slot;
2728
2729	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2730
2731	if (pending) {
2732		/* Check volt switch first, since it can look like an error */
2733		if ((host->state == STATE_SENDING_CMD11) &&
2734		    (pending & SDMMC_INT_VOLT_SWITCH)) {
2735			mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2736			pending &= ~SDMMC_INT_VOLT_SWITCH;
2737
2738			/*
2739			 * Hold the lock; we know cmd11_timer can't be kicked
2740			 * off after the lock is released, so safe to delete.
2741			 */
2742			spin_lock(&host->irq_lock);
2743			dw_mci_cmd_interrupt(host, pending);
2744			spin_unlock(&host->irq_lock);
2745
2746			del_timer(&host->cmd11_timer);
2747		}
2748
 
2749		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2750			spin_lock(&host->irq_lock);
2751
2752			del_timer(&host->cto_timer);
2753			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2754			host->cmd_status = pending;
2755			smp_wmb(); /* drain writebuffer */
2756			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2757
2758			spin_unlock(&host->irq_lock);
2759		}
2760
2761		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2762			spin_lock(&host->irq_lock);
2763
2764			if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2765				del_timer(&host->dto_timer);
2766
2767			/* if there is an error report DATA_ERROR */
2768			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2769			host->data_status = pending;
2770			smp_wmb(); /* drain writebuffer */
2771			set_bit(EVENT_DATA_ERROR, &host->pending_events);
2772
2773			if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2774				/* In case of error, we cannot expect a DTO */
2775				set_bit(EVENT_DATA_COMPLETE,
2776					&host->pending_events);
2777
2778			tasklet_schedule(&host->tasklet);
2779
2780			spin_unlock(&host->irq_lock);
2781		}
2782
2783		if (pending & SDMMC_INT_DATA_OVER) {
2784			spin_lock(&host->irq_lock);
2785
2786			del_timer(&host->dto_timer);
2787
2788			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2789			if (!host->data_status)
2790				host->data_status = pending;
2791			smp_wmb(); /* drain writebuffer */
2792			if (host->dir_status == DW_MCI_RECV_STATUS) {
2793				if (host->sg != NULL)
2794					dw_mci_read_data_pio(host, true);
2795			}
2796			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2797			tasklet_schedule(&host->tasklet);
2798
2799			spin_unlock(&host->irq_lock);
2800		}
2801
2802		if (pending & SDMMC_INT_RXDR) {
2803			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2804			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2805				dw_mci_read_data_pio(host, false);
2806		}
2807
2808		if (pending & SDMMC_INT_TXDR) {
2809			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2810			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2811				dw_mci_write_data_pio(host);
2812		}
2813
2814		if (pending & SDMMC_INT_CMD_DONE) {
2815			spin_lock(&host->irq_lock);
2816
2817			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2818			dw_mci_cmd_interrupt(host, pending);
2819
2820			spin_unlock(&host->irq_lock);
2821		}
2822
2823		if (pending & SDMMC_INT_CD) {
2824			mci_writel(host, RINTSTS, SDMMC_INT_CD);
2825			dw_mci_handle_cd(host);
2826		}
2827
2828		if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2829			mci_writel(host, RINTSTS,
2830				   SDMMC_INT_SDIO(slot->sdio_id));
2831			__dw_mci_enable_sdio_irq(slot, 0);
2832			sdio_signal_irq(slot->mmc);
 
 
2833		}
2834
2835	}
2836
2837	if (host->use_dma != TRANS_MODE_IDMAC)
2838		return IRQ_HANDLED;
2839
2840	/* Handle IDMA interrupts */
2841	if (host->dma_64bit_address == 1) {
2842		pending = mci_readl(host, IDSTS64);
2843		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2844			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2845							SDMMC_IDMAC_INT_RI);
2846			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2847			if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2848				host->dma_ops->complete((void *)host);
2849		}
2850	} else {
2851		pending = mci_readl(host, IDSTS);
2852		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2853			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2854							SDMMC_IDMAC_INT_RI);
2855			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2856			if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2857				host->dma_ops->complete((void *)host);
2858		}
2859	}
 
2860
2861	return IRQ_HANDLED;
2862}
2863
2864static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2865{
2866	struct dw_mci *host = slot->host;
2867	const struct dw_mci_drv_data *drv_data = host->drv_data;
2868	struct mmc_host *mmc = slot->mmc;
2869	int ctrl_id;
2870
2871	if (host->pdata->caps)
2872		mmc->caps = host->pdata->caps;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2873
2874	if (host->pdata->pm_caps)
2875		mmc->pm_caps = host->pdata->pm_caps;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2876
2877	if (drv_data)
2878		mmc->caps |= drv_data->common_caps;
 
 
 
 
 
 
 
 
 
 
 
 
2879
2880	if (host->dev->of_node) {
2881		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2882		if (ctrl_id < 0)
2883			ctrl_id = 0;
2884	} else {
2885		ctrl_id = to_platform_device(host->dev)->id;
2886	}
2887
2888	if (drv_data && drv_data->caps) {
2889		if (ctrl_id >= drv_data->num_caps) {
2890			dev_err(host->dev, "invalid controller id %d\n",
2891				ctrl_id);
2892			return -EINVAL;
2893		}
2894		mmc->caps |= drv_data->caps[ctrl_id];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2895	}
2896
2897	if (host->pdata->caps2)
2898		mmc->caps2 = host->pdata->caps2;
2899
2900	/* if host has set a minimum_freq, we should respect it */
2901	if (host->minimum_speed)
2902		mmc->f_min = host->minimum_speed;
2903	else
2904		mmc->f_min = DW_MCI_FREQ_MIN;
 
2905
2906	if (!mmc->f_max)
2907		mmc->f_max = DW_MCI_FREQ_MAX;
 
 
2908
2909	/* Process SDIO IRQs through the sdio_irq_work. */
2910	if (mmc->caps & MMC_CAP_SDIO_IRQ)
2911		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2912
 
 
 
 
 
 
2913	return 0;
2914}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2915
2916static int dw_mci_init_slot(struct dw_mci *host)
2917{
2918	struct mmc_host *mmc;
2919	struct dw_mci_slot *slot;
2920	int ret;
 
 
 
2921
2922	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2923	if (!mmc)
2924		return -ENOMEM;
2925
2926	slot = mmc_priv(mmc);
2927	slot->id = 0;
2928	slot->sdio_id = host->sdio_id0 + slot->id;
2929	slot->mmc = mmc;
2930	slot->host = host;
2931	host->slot = slot;
2932
2933	mmc->ops = &dw_mci_ops;
2934
2935	/*if there are external regulators, get them*/
2936	ret = mmc_regulator_get_supply(mmc);
2937	if (ret)
2938		goto err_host_allocated;
 
 
 
 
 
2939
2940	if (!mmc->ocr_avail)
 
 
2941		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2942
2943	ret = mmc_of_parse(mmc);
2944	if (ret)
2945		goto err_host_allocated;
 
 
 
2946
2947	ret = dw_mci_init_slot_caps(slot);
2948	if (ret)
2949		goto err_host_allocated;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2950
2951	/* Useful defaults if platform data is unset. */
2952	if (host->use_dma == TRANS_MODE_IDMAC) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2953		mmc->max_segs = host->ring_size;
2954		mmc->max_blk_size = 65535;
 
2955		mmc->max_seg_size = 0x1000;
2956		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2957		mmc->max_blk_count = mmc->max_req_size / 512;
2958	} else if (host->use_dma == TRANS_MODE_EDMAC) {
2959		mmc->max_segs = 64;
2960		mmc->max_blk_size = 65535;
2961		mmc->max_blk_count = 65535;
2962		mmc->max_req_size =
2963				mmc->max_blk_size * mmc->max_blk_count;
2964		mmc->max_seg_size = mmc->max_req_size;
2965	} else {
2966		/* TRANS_MODE_PIO */
2967		mmc->max_segs = 64;
2968		mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2969		mmc->max_blk_count = 512;
2970		mmc->max_req_size = mmc->max_blk_size *
2971				    mmc->max_blk_count;
2972		mmc->max_seg_size = mmc->max_req_size;
 
2973	}
2974
2975	dw_mci_get_cd(mmc);
 
2976
2977	ret = mmc_add_host(mmc);
2978	if (ret)
2979		goto err_host_allocated;
2980
2981#if defined(CONFIG_DEBUG_FS)
2982	dw_mci_init_debugfs(slot);
2983#endif
2984
 
 
 
2985	return 0;
2986
2987err_host_allocated:
2988	mmc_free_host(mmc);
2989	return ret;
2990}
2991
2992static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2993{
 
 
 
 
2994	/* Debugfs stuff is cleaned up by mmc core */
2995	mmc_remove_host(slot->mmc);
2996	slot->host->slot = NULL;
2997	mmc_free_host(slot->mmc);
2998}
2999
3000static void dw_mci_init_dma(struct dw_mci *host)
3001{
3002	int addr_config;
3003	struct device *dev = host->dev;
3004
3005	/*
3006	* Check tansfer mode from HCON[17:16]
3007	* Clear the ambiguous description of dw_mmc databook:
3008	* 2b'00: No DMA Interface -> Actually means using Internal DMA block
3009	* 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
3010	* 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
3011	* 2b'11: Non DW DMA Interface -> pio only
3012	* Compared to DesignWare DMA Interface, Generic DMA Interface has a
3013	* simpler request/acknowledge handshake mechanism and both of them
3014	* are regarded as external dma master for dw_mmc.
3015	*/
3016	host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
3017	if (host->use_dma == DMA_INTERFACE_IDMA) {
3018		host->use_dma = TRANS_MODE_IDMAC;
3019	} else if (host->use_dma == DMA_INTERFACE_DWDMA ||
3020		   host->use_dma == DMA_INTERFACE_GDMA) {
3021		host->use_dma = TRANS_MODE_EDMAC;
3022	} else {
3023		goto no_dma;
3024	}
3025
3026	/* Determine which DMA interface to use */
3027	if (host->use_dma == TRANS_MODE_IDMAC) {
3028		/*
3029		* Check ADDR_CONFIG bit in HCON to find
3030		* IDMAC address bus width
3031		*/
3032		addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
3033
3034		if (addr_config == 1) {
3035			/* host supports IDMAC in 64-bit address mode */
3036			host->dma_64bit_address = 1;
3037			dev_info(host->dev,
3038				 "IDMAC supports 64-bit address mode.\n");
3039			if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
3040				dma_set_coherent_mask(host->dev,
3041						      DMA_BIT_MASK(64));
3042		} else {
3043			/* host supports IDMAC in 32-bit address mode */
3044			host->dma_64bit_address = 0;
3045			dev_info(host->dev,
3046				 "IDMAC supports 32-bit address mode.\n");
3047		}
3048
3049		/* Alloc memory for sg translation */
3050		host->sg_cpu = dmam_alloc_coherent(host->dev,
3051						   DESC_RING_BUF_SZ,
3052						   &host->sg_dma, GFP_KERNEL);
3053		if (!host->sg_cpu) {
3054			dev_err(host->dev,
3055				"%s: could not alloc DMA memory\n",
3056				__func__);
3057			goto no_dma;
3058		}
3059
3060		host->dma_ops = &dw_mci_idmac_ops;
3061		dev_info(host->dev, "Using internal DMA controller.\n");
3062	} else {
3063		/* TRANS_MODE_EDMAC: check dma bindings again */
3064		if ((device_property_string_array_count(dev, "dma-names") < 0) ||
3065		    !device_property_present(dev, "dmas")) {
3066			goto no_dma;
3067		}
3068		host->dma_ops = &dw_mci_edmac_ops;
3069		dev_info(host->dev, "Using external DMA controller.\n");
3070	}
3071
3072	if (host->dma_ops->init && host->dma_ops->start &&
3073	    host->dma_ops->stop && host->dma_ops->cleanup) {
3074		if (host->dma_ops->init(host)) {
3075			dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
3076				__func__);
3077			goto no_dma;
3078		}
3079	} else {
3080		dev_err(host->dev, "DMA initialization not found.\n");
3081		goto no_dma;
3082	}
3083
 
3084	return;
3085
3086no_dma:
3087	dev_info(host->dev, "Using PIO mode.\n");
3088	host->use_dma = TRANS_MODE_PIO;
3089}
3090
3091static void dw_mci_cmd11_timer(struct timer_list *t)
3092{
3093	struct dw_mci *host = from_timer(host, t, cmd11_timer);
3094
3095	if (host->state != STATE_SENDING_CMD11) {
3096		dev_warn(host->dev, "Unexpected CMD11 timeout\n");
3097		return;
3098	}
3099
3100	host->cmd_status = SDMMC_INT_RTO;
3101	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3102	tasklet_schedule(&host->tasklet);
3103}
3104
3105static void dw_mci_cto_timer(struct timer_list *t)
3106{
3107	struct dw_mci *host = from_timer(host, t, cto_timer);
3108	unsigned long irqflags;
3109	u32 pending;
3110
3111	spin_lock_irqsave(&host->irq_lock, irqflags);
 
 
3112
3113	/*
3114	 * If somehow we have very bad interrupt latency it's remotely possible
3115	 * that the timer could fire while the interrupt is still pending or
3116	 * while the interrupt is midway through running.  Let's be paranoid
3117	 * and detect those two cases.  Note that this is paranoia is somewhat
3118	 * justified because in this function we don't actually cancel the
3119	 * pending command in the controller--we just assume it will never come.
3120	 */
3121	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3122	if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3123		/* The interrupt should fire; no need to act but we can warn */
3124		dev_warn(host->dev, "Unexpected interrupt latency\n");
3125		goto exit;
3126	}
3127	if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3128		/* Presumably interrupt handler couldn't delete the timer */
3129		dev_warn(host->dev, "CTO timeout when already completed\n");
3130		goto exit;
3131	}
3132
3133	/*
3134	 * Continued paranoia to make sure we're in the state we expect.
3135	 * This paranoia isn't really justified but it seems good to be safe.
3136	 */
3137	switch (host->state) {
3138	case STATE_SENDING_CMD11:
3139	case STATE_SENDING_CMD:
3140	case STATE_SENDING_STOP:
3141		/*
3142		 * If CMD_DONE interrupt does NOT come in sending command
3143		 * state, we should notify the driver to terminate current
3144		 * transfer and report a command timeout to the core.
3145		 */
3146		host->cmd_status = SDMMC_INT_RTO;
3147		set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3148		tasklet_schedule(&host->tasklet);
3149		break;
3150	default:
3151		dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3152			 host->state);
3153		break;
3154	}
3155
3156exit:
3157	spin_unlock_irqrestore(&host->irq_lock, irqflags);
3158}
3159
3160static void dw_mci_dto_timer(struct timer_list *t)
3161{
3162	struct dw_mci *host = from_timer(host, t, dto_timer);
3163	unsigned long irqflags;
3164	u32 pending;
3165
3166	spin_lock_irqsave(&host->irq_lock, irqflags);
3167
3168	/*
3169	 * The DTO timer is much longer than the CTO timer, so it's even less
3170	 * likely that we'll these cases, but it pays to be paranoid.
3171	 */
3172	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3173	if (pending & SDMMC_INT_DATA_OVER) {
3174		/* The interrupt should fire; no need to act but we can warn */
3175		dev_warn(host->dev, "Unexpected data interrupt latency\n");
3176		goto exit;
3177	}
3178	if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3179		/* Presumably interrupt handler couldn't delete the timer */
3180		dev_warn(host->dev, "DTO timeout when already completed\n");
3181		goto exit;
3182	}
3183
3184	/*
3185	 * Continued paranoia to make sure we're in the state we expect.
3186	 * This paranoia isn't really justified but it seems good to be safe.
3187	 */
3188	switch (host->state) {
3189	case STATE_SENDING_DATA:
3190	case STATE_DATA_BUSY:
3191		/*
3192		 * If DTO interrupt does NOT come in sending data state,
3193		 * we should notify the driver to terminate current transfer
3194		 * and report a data timeout to the core.
3195		 */
3196		host->data_status = SDMMC_INT_DRTO;
3197		set_bit(EVENT_DATA_ERROR, &host->pending_events);
3198		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3199		tasklet_schedule(&host->tasklet);
3200		break;
3201	default:
3202		dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3203			 host->state);
3204		break;
3205	}
3206
3207exit:
3208	spin_unlock_irqrestore(&host->irq_lock, irqflags);
 
 
 
 
3209}
3210
3211#ifdef CONFIG_OF
 
 
 
 
 
 
 
 
 
 
3212static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3213{
3214	struct dw_mci_board *pdata;
3215	struct device *dev = host->dev;
 
3216	const struct dw_mci_drv_data *drv_data = host->drv_data;
3217	int ret;
3218	u32 clock_frequency;
3219
3220	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3221	if (!pdata)
 
3222		return ERR_PTR(-ENOMEM);
 
3223
3224	/* find reset controller when exist */
3225	pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3226	if (IS_ERR(pdata->rstc))
3227		return ERR_CAST(pdata->rstc);
3228
3229	if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3230		dev_info(dev,
3231			 "fifo-depth property not found, using value of FIFOTH register as default\n");
3232
3233	device_property_read_u32(dev, "card-detect-delay",
3234				 &pdata->detect_delay_ms);
3235
3236	device_property_read_u32(dev, "data-addr", &host->data_addr_override);
 
 
 
3237
3238	if (device_property_present(dev, "fifo-watermark-aligned"))
3239		host->wm_aligned = true;
3240
3241	if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3242		pdata->bus_hz = clock_frequency;
3243
3244	if (drv_data && drv_data->parse_dt) {
3245		ret = drv_data->parse_dt(host);
3246		if (ret)
3247			return ERR_PTR(ret);
3248	}
3249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3250	return pdata;
3251}
3252
3253#else /* CONFIG_OF */
3254static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3255{
3256	return ERR_PTR(-EINVAL);
3257}
3258#endif /* CONFIG_OF */
3259
3260static void dw_mci_enable_cd(struct dw_mci *host)
3261{
3262	unsigned long irqflags;
3263	u32 temp;
3264
3265	/*
3266	 * No need for CD if all slots have a non-error GPIO
3267	 * as well as broken card detection is found.
3268	 */
3269	if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3270		return;
3271
3272	if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3273		spin_lock_irqsave(&host->irq_lock, irqflags);
3274		temp = mci_readl(host, INTMASK);
3275		temp  |= SDMMC_INT_CD;
3276		mci_writel(host, INTMASK, temp);
3277		spin_unlock_irqrestore(&host->irq_lock, irqflags);
3278	}
3279}
3280
3281int dw_mci_probe(struct dw_mci *host)
3282{
3283	const struct dw_mci_drv_data *drv_data = host->drv_data;
3284	int width, i, ret = 0;
3285	u32 fifo_size;
 
3286
3287	if (!host->pdata) {
3288		host->pdata = dw_mci_parse_dt(host);
3289		if (IS_ERR(host->pdata))
3290			return dev_err_probe(host->dev, PTR_ERR(host->pdata),
3291					     "platform data not available\n");
 
 
 
 
 
 
 
3292	}
3293
3294	host->biu_clk = devm_clk_get(host->dev, "biu");
3295	if (IS_ERR(host->biu_clk)) {
3296		dev_dbg(host->dev, "biu clock not available\n");
3297	} else {
3298		ret = clk_prepare_enable(host->biu_clk);
3299		if (ret) {
3300			dev_err(host->dev, "failed to enable biu clock\n");
3301			return ret;
3302		}
3303	}
3304
3305	host->ciu_clk = devm_clk_get(host->dev, "ciu");
3306	if (IS_ERR(host->ciu_clk)) {
3307		dev_dbg(host->dev, "ciu clock not available\n");
3308		host->bus_hz = host->pdata->bus_hz;
3309	} else {
3310		ret = clk_prepare_enable(host->ciu_clk);
3311		if (ret) {
3312			dev_err(host->dev, "failed to enable ciu clock\n");
3313			goto err_clk_biu;
3314		}
3315
3316		if (host->pdata->bus_hz) {
3317			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3318			if (ret)
3319				dev_warn(host->dev,
3320					 "Unable to set bus rate to %uHz\n",
3321					 host->pdata->bus_hz);
3322		}
3323		host->bus_hz = clk_get_rate(host->ciu_clk);
3324	}
3325
3326	if (!host->bus_hz) {
3327		dev_err(host->dev,
3328			"Platform data must supply bus speed\n");
3329		ret = -ENODEV;
3330		goto err_clk_ciu;
3331	}
3332
3333	if (host->pdata->rstc) {
3334		reset_control_assert(host->pdata->rstc);
3335		usleep_range(10, 50);
3336		reset_control_deassert(host->pdata->rstc);
3337	}
3338
3339	if (drv_data && drv_data->init) {
3340		ret = drv_data->init(host);
3341		if (ret) {
3342			dev_err(host->dev,
3343				"implementation specific init failed\n");
3344			goto err_clk_ciu;
3345		}
3346	}
3347
3348	timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3349	timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3350	timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3351
3352	spin_lock_init(&host->lock);
3353	spin_lock_init(&host->irq_lock);
3354	INIT_LIST_HEAD(&host->queue);
3355
3356	dw_mci_init_fault(host);
3357
3358	/*
3359	 * Get the host data width - this assumes that HCON has been set with
3360	 * the correct values.
3361	 */
3362	i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3363	if (!i) {
3364		host->push_data = dw_mci_push_data16;
3365		host->pull_data = dw_mci_pull_data16;
3366		width = 16;
3367		host->data_shift = 1;
3368	} else if (i == 2) {
3369		host->push_data = dw_mci_push_data64;
3370		host->pull_data = dw_mci_pull_data64;
3371		width = 64;
3372		host->data_shift = 3;
3373	} else {
3374		/* Check for a reserved value, and warn if it is */
3375		WARN((i != 1),
3376		     "HCON reports a reserved host data width!\n"
3377		     "Defaulting to 32-bit access.\n");
3378		host->push_data = dw_mci_push_data32;
3379		host->pull_data = dw_mci_pull_data32;
3380		width = 32;
3381		host->data_shift = 2;
3382	}
3383
3384	/* Reset all blocks */
3385	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3386		ret = -ENODEV;
3387		goto err_clk_ciu;
3388	}
3389
3390	host->dma_ops = host->pdata->dma_ops;
3391	dw_mci_init_dma(host);
3392
3393	/* Clear the interrupts for the host controller */
3394	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3395	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3396
3397	/* Put in max timeout */
3398	mci_writel(host, TMOUT, 0xFFFFFFFF);
3399
3400	/*
3401	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
3402	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
3403	 */
3404	if (!host->pdata->fifo_depth) {
3405		/*
3406		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3407		 * have been overwritten by the bootloader, just like we're
3408		 * about to do, so if you know the value for your hardware, you
3409		 * should put it in the platform data.
3410		 */
3411		fifo_size = mci_readl(host, FIFOTH);
3412		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3413	} else {
3414		fifo_size = host->pdata->fifo_depth;
3415	}
3416	host->fifo_depth = fifo_size;
3417	host->fifoth_val =
3418		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3419	mci_writel(host, FIFOTH, host->fifoth_val);
3420
3421	/* disable clock to CIU */
3422	mci_writel(host, CLKENA, 0);
3423	mci_writel(host, CLKSRC, 0);
3424
3425	/*
3426	 * In 2.40a spec, Data offset is changed.
3427	 * Need to check the version-id and set data-offset for DATA register.
3428	 */
3429	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3430	dev_info(host->dev, "Version ID is %04x\n", host->verid);
3431
3432	if (host->data_addr_override)
3433		host->fifo_reg = host->regs + host->data_addr_override;
3434	else if (host->verid < DW_MMC_240A)
3435		host->fifo_reg = host->regs + DATA_OFFSET;
3436	else
3437		host->fifo_reg = host->regs + DATA_240A_OFFSET;
3438
3439	tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
 
 
 
 
 
 
 
3440	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3441			       host->irq_flags, "dw-mci", host);
3442	if (ret)
3443		goto err_dmaunmap;
 
 
 
 
 
3444
3445	/*
3446	 * Enable interrupts for command done, data over, data empty,
3447	 * receive ready and error such as transmit, receive timeout, crc error
3448	 */
 
3449	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3450		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3451		   DW_MCI_ERROR_FLAGS);
3452	/* Enable mci interrupt */
3453	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3454
3455	dev_info(host->dev,
3456		 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
 
3457		 host->irq, width, fifo_size);
3458
3459	/* We need at least one slot to succeed */
3460	ret = dw_mci_init_slot(host);
3461	if (ret) {
3462		dev_dbg(host->dev, "slot %d init failed\n", i);
3463		goto err_dmaunmap;
 
 
 
 
 
 
 
 
 
 
3464	}
3465
3466	/* Now that slots are all setup, we can enable card detect */
3467	dw_mci_enable_cd(host);
3468
3469	return 0;
3470
 
 
 
3471err_dmaunmap:
3472	if (host->use_dma && host->dma_ops->exit)
3473		host->dma_ops->exit(host);
3474
3475	reset_control_assert(host->pdata->rstc);
 
 
3476
3477err_clk_ciu:
3478	clk_disable_unprepare(host->ciu_clk);
 
3479
3480err_clk_biu:
3481	clk_disable_unprepare(host->biu_clk);
 
3482
3483	return ret;
3484}
3485EXPORT_SYMBOL(dw_mci_probe);
3486
3487void dw_mci_remove(struct dw_mci *host)
3488{
3489	dev_dbg(host->dev, "remove slot\n");
3490	if (host->slot)
3491		dw_mci_cleanup_slot(host->slot);
3492
3493	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3494	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3495
 
 
 
 
 
 
3496	/* disable clock to CIU */
3497	mci_writel(host, CLKENA, 0);
3498	mci_writel(host, CLKSRC, 0);
3499
 
 
3500	if (host->use_dma && host->dma_ops->exit)
3501		host->dma_ops->exit(host);
3502
3503	reset_control_assert(host->pdata->rstc);
 
 
 
 
3504
3505	clk_disable_unprepare(host->ciu_clk);
3506	clk_disable_unprepare(host->biu_clk);
3507}
3508EXPORT_SYMBOL(dw_mci_remove);
3509
3510
3511
3512#ifdef CONFIG_PM
3513int dw_mci_runtime_suspend(struct device *dev)
 
 
 
3514{
3515	struct dw_mci *host = dev_get_drvdata(dev);
3516
3517	if (host->use_dma && host->dma_ops->exit)
3518		host->dma_ops->exit(host);
3519
3520	clk_disable_unprepare(host->ciu_clk);
3521
3522	if (host->slot &&
3523	    (mmc_can_gpio_cd(host->slot->mmc) ||
3524	     !mmc_card_is_removable(host->slot->mmc)))
3525		clk_disable_unprepare(host->biu_clk);
3526
3527	return 0;
3528}
3529EXPORT_SYMBOL(dw_mci_runtime_suspend);
3530
3531int dw_mci_runtime_resume(struct device *dev)
3532{
3533	int ret = 0;
3534	struct dw_mci *host = dev_get_drvdata(dev);
3535
3536	if (host->slot &&
3537	    (mmc_can_gpio_cd(host->slot->mmc) ||
3538	     !mmc_card_is_removable(host->slot->mmc))) {
3539		ret = clk_prepare_enable(host->biu_clk);
3540		if (ret)
3541			return ret;
 
3542	}
3543
3544	ret = clk_prepare_enable(host->ciu_clk);
3545	if (ret)
3546		goto err;
3547
3548	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3549		clk_disable_unprepare(host->ciu_clk);
3550		ret = -ENODEV;
3551		goto err;
3552	}
3553
3554	if (host->use_dma && host->dma_ops->init)
3555		host->dma_ops->init(host);
3556
3557	/*
3558	 * Restore the initial value at FIFOTH register
3559	 * And Invalidate the prev_blksz with zero
3560	 */
3561	mci_writel(host, FIFOTH, host->fifoth_val);
3562	host->prev_blksz = 0;
3563
3564	/* Put in max timeout */
3565	mci_writel(host, TMOUT, 0xFFFFFFFF);
3566
3567	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3568	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3569		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3570		   DW_MCI_ERROR_FLAGS);
3571	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3572
3573
3574	if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3575		dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3576
3577	/* Force setup bus to guarantee available clock output */
3578	dw_mci_setup_bus(host->slot, true);
3579
3580	/* Re-enable SDIO interrupts. */
3581	if (sdio_irq_claimed(host->slot->mmc))
3582		__dw_mci_enable_sdio_irq(host->slot, 1);
3583
3584	/* Now that slots are all setup, we can enable card detect */
3585	dw_mci_enable_cd(host);
3586
3587	return 0;
3588
3589err:
3590	if (host->slot &&
3591	    (mmc_can_gpio_cd(host->slot->mmc) ||
3592	     !mmc_card_is_removable(host->slot->mmc)))
3593		clk_disable_unprepare(host->biu_clk);
3594
3595	return ret;
3596}
3597EXPORT_SYMBOL(dw_mci_runtime_resume);
3598#endif /* CONFIG_PM */
3599
3600static int __init dw_mci_init(void)
3601{
3602	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3603	return 0;
3604}
3605
3606static void __exit dw_mci_exit(void)
3607{
3608}
3609
3610module_init(dw_mci_init);
3611module_exit(dw_mci_exit);
3612
3613MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3614MODULE_AUTHOR("NXP Semiconductor VietNam");
3615MODULE_AUTHOR("Imagination Technologies Ltd");
3616MODULE_LICENSE("GPL v2");
v3.15
 
   1/*
   2 * Synopsys DesignWare Multimedia Card Interface driver
   3 *  (Based on NXP driver for lpc 31xx)
   4 *
   5 * Copyright (C) 2009 NXP Semiconductors
   6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14#include <linux/blkdev.h>
  15#include <linux/clk.h>
  16#include <linux/debugfs.h>
  17#include <linux/device.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/err.h>
  20#include <linux/init.h>
  21#include <linux/interrupt.h>
 
  22#include <linux/ioport.h>
 
  23#include <linux/module.h>
  24#include <linux/platform_device.h>
 
 
  25#include <linux/seq_file.h>
  26#include <linux/slab.h>
  27#include <linux/stat.h>
  28#include <linux/delay.h>
  29#include <linux/irq.h>
 
  30#include <linux/mmc/host.h>
  31#include <linux/mmc/mmc.h>
 
  32#include <linux/mmc/sdio.h>
  33#include <linux/mmc/dw_mmc.h>
  34#include <linux/bitops.h>
  35#include <linux/regulator/consumer.h>
  36#include <linux/workqueue.h>
  37#include <linux/of.h>
  38#include <linux/of_gpio.h>
  39#include <linux/mmc/slot-gpio.h>
  40
  41#include "dw_mmc.h"
  42
  43/* Common flag combinations */
  44#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
  45				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
  46				 SDMMC_INT_EBE)
  47#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  48				 SDMMC_INT_RESP_ERR)
  49#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
  50				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
  51#define DW_MCI_SEND_STATUS	1
  52#define DW_MCI_RECV_STATUS	2
  53#define DW_MCI_DMA_THRESHOLD	16
  54
  55#define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
  56#define DW_MCI_FREQ_MIN	400000		/* unit: HZ */
  57
  58#ifdef CONFIG_MMC_DW_IDMAC
  59#define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
  60				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
  61				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
  62				 SDMMC_IDMAC_INT_TI)
  63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64struct idmac_desc {
  65	u32		des0;	/* Control Descriptor */
  66#define IDMAC_DES0_DIC	BIT(1)
  67#define IDMAC_DES0_LD	BIT(2)
  68#define IDMAC_DES0_FD	BIT(3)
  69#define IDMAC_DES0_CH	BIT(4)
  70#define IDMAC_DES0_ER	BIT(5)
  71#define IDMAC_DES0_CES	BIT(30)
  72#define IDMAC_DES0_OWN	BIT(31)
  73
  74	u32		des1;	/* Buffer sizes */
  75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
  76	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
  77
  78	u32		des2;	/* buffer 1 physical address */
  79
  80	u32		des3;	/* buffer 2 physical address */
  81};
  82#endif /* CONFIG_MMC_DW_IDMAC */
  83
  84static const u8 tuning_blk_pattern_4bit[] = {
  85	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  86	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  87	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  88	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  89	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  90	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  91	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  92	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  93};
  94
  95static const u8 tuning_blk_pattern_8bit[] = {
  96	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  97	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  98	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  99	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
 100	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
 101	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
 102	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
 103	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
 104	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
 105	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
 106	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
 107	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
 108	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
 109	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
 110	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
 111	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
 112};
 113
 114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
 115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
 116
 117#if defined(CONFIG_DEBUG_FS)
 118static int dw_mci_req_show(struct seq_file *s, void *v)
 119{
 120	struct dw_mci_slot *slot = s->private;
 121	struct mmc_request *mrq;
 122	struct mmc_command *cmd;
 123	struct mmc_command *stop;
 124	struct mmc_data	*data;
 125
 126	/* Make sure we get a consistent snapshot */
 127	spin_lock_bh(&slot->host->lock);
 128	mrq = slot->mrq;
 129
 130	if (mrq) {
 131		cmd = mrq->cmd;
 132		data = mrq->data;
 133		stop = mrq->stop;
 134
 135		if (cmd)
 136			seq_printf(s,
 137				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 138				   cmd->opcode, cmd->arg, cmd->flags,
 139				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
 140				   cmd->resp[2], cmd->error);
 141		if (data)
 142			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
 143				   data->bytes_xfered, data->blocks,
 144				   data->blksz, data->flags, data->error);
 145		if (stop)
 146			seq_printf(s,
 147				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 148				   stop->opcode, stop->arg, stop->flags,
 149				   stop->resp[0], stop->resp[1], stop->resp[2],
 150				   stop->resp[2], stop->error);
 151	}
 152
 153	spin_unlock_bh(&slot->host->lock);
 154
 155	return 0;
 156}
 
 157
 158static int dw_mci_req_open(struct inode *inode, struct file *file)
 159{
 160	return single_open(file, dw_mci_req_show, inode->i_private);
 161}
 
 162
 163static const struct file_operations dw_mci_req_fops = {
 164	.owner		= THIS_MODULE,
 165	.open		= dw_mci_req_open,
 166	.read		= seq_read,
 167	.llseek		= seq_lseek,
 168	.release	= single_release,
 169};
 170
 171static int dw_mci_regs_show(struct seq_file *s, void *v)
 172{
 173	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
 174	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
 175	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
 176	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
 177	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
 178	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
 179
 180	return 0;
 181}
 182
 183static int dw_mci_regs_open(struct inode *inode, struct file *file)
 184{
 185	return single_open(file, dw_mci_regs_show, inode->i_private);
 186}
 187
 188static const struct file_operations dw_mci_regs_fops = {
 189	.owner		= THIS_MODULE,
 190	.open		= dw_mci_regs_open,
 191	.read		= seq_read,
 192	.llseek		= seq_lseek,
 193	.release	= single_release,
 194};
 195
 196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
 197{
 198	struct mmc_host	*mmc = slot->mmc;
 199	struct dw_mci *host = slot->host;
 200	struct dentry *root;
 201	struct dentry *node;
 202
 203	root = mmc->debugfs_root;
 204	if (!root)
 205		return;
 206
 207	node = debugfs_create_file("regs", S_IRUSR, root, host,
 208				   &dw_mci_regs_fops);
 209	if (!node)
 210		goto err;
 
 
 
 
 
 
 
 
 211
 212	node = debugfs_create_file("req", S_IRUSR, root, slot,
 213				   &dw_mci_req_fops);
 214	if (!node)
 215		goto err;
 216
 217	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
 218	if (!node)
 219		goto err;
 220
 221	node = debugfs_create_x32("pending_events", S_IRUSR, root,
 222				  (u32 *)&host->pending_events);
 223	if (!node)
 224		goto err;
 
 
 
 
 
 225
 226	node = debugfs_create_x32("completed_events", S_IRUSR, root,
 227				  (u32 *)&host->completed_events);
 228	if (!node)
 229		goto err;
 230
 231	return;
 
 
 232
 233err:
 234	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235}
 236#endif /* defined(CONFIG_DEBUG_FS) */
 237
 238static void dw_mci_set_timeout(struct dw_mci *host)
 239{
 240	/* timeout (maximum) */
 241	mci_writel(host, TMOUT, 0xffffffff);
 
 
 
 
 
 
 
 
 
 
 
 
 242}
 243
 244static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
 245{
 246	struct mmc_data	*data;
 247	struct dw_mci_slot *slot = mmc_priv(mmc);
 248	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
 249	u32 cmdr;
 
 250	cmd->error = -EINPROGRESS;
 251
 252	cmdr = cmd->opcode;
 253
 254	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
 255	    cmd->opcode == MMC_GO_IDLE_STATE ||
 256	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
 257	    (cmd->opcode == SD_IO_RW_DIRECT &&
 258	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
 259		cmdr |= SDMMC_CMD_STOP;
 260	else
 261		if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
 262			cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263
 264	if (cmd->flags & MMC_RSP_PRESENT) {
 265		/* We expect a response, so set this bit */
 266		cmdr |= SDMMC_CMD_RESP_EXP;
 267		if (cmd->flags & MMC_RSP_136)
 268			cmdr |= SDMMC_CMD_RESP_LONG;
 269	}
 270
 271	if (cmd->flags & MMC_RSP_CRC)
 272		cmdr |= SDMMC_CMD_RESP_CRC;
 273
 274	data = cmd->data;
 275	if (data) {
 276		cmdr |= SDMMC_CMD_DAT_EXP;
 277		if (data->flags & MMC_DATA_STREAM)
 278			cmdr |= SDMMC_CMD_STRM_MODE;
 279		if (data->flags & MMC_DATA_WRITE)
 280			cmdr |= SDMMC_CMD_DAT_WR;
 281	}
 282
 283	if (drv_data && drv_data->prepare_command)
 284		drv_data->prepare_command(slot->host, &cmdr);
 285
 286	return cmdr;
 287}
 288
 289static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
 290{
 291	struct mmc_command *stop;
 292	u32 cmdr;
 293
 294	if (!cmd->data)
 295		return 0;
 296
 297	stop = &host->stop_abort;
 298	cmdr = cmd->opcode;
 299	memset(stop, 0, sizeof(struct mmc_command));
 300
 301	if (cmdr == MMC_READ_SINGLE_BLOCK ||
 302	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
 303	    cmdr == MMC_WRITE_BLOCK ||
 304	    cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
 
 
 305		stop->opcode = MMC_STOP_TRANSMISSION;
 306		stop->arg = 0;
 307		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 308	} else if (cmdr == SD_IO_RW_EXTENDED) {
 309		stop->opcode = SD_IO_RW_DIRECT;
 310		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
 311			     ((cmd->arg >> 28) & 0x7);
 312		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
 313	} else {
 314		return 0;
 315	}
 316
 317	cmdr = stop->opcode | SDMMC_CMD_STOP |
 318		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
 319
 
 
 
 320	return cmdr;
 321}
 322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323static void dw_mci_start_command(struct dw_mci *host,
 324				 struct mmc_command *cmd, u32 cmd_flags)
 325{
 326	host->cmd = cmd;
 327	dev_vdbg(host->dev,
 328		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
 329		 cmd->arg, cmd_flags);
 330
 331	mci_writel(host, CMDARG, cmd->arg);
 332	wmb();
 
 333
 334	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
 
 
 
 
 335}
 336
 337static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
 338{
 339	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
 
 340	dw_mci_start_command(host, stop, host->stop_cmdr);
 341}
 342
 343/* DMA interface functions */
 344static void dw_mci_stop_dma(struct dw_mci *host)
 345{
 346	if (host->using_dma) {
 347		host->dma_ops->stop(host);
 348		host->dma_ops->cleanup(host);
 349	}
 350
 351	/* Data transfer was stopped by the interrupt handler */
 352	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 353}
 354
 355static int dw_mci_get_dma_dir(struct mmc_data *data)
 356{
 357	if (data->flags & MMC_DATA_WRITE)
 358		return DMA_TO_DEVICE;
 359	else
 360		return DMA_FROM_DEVICE;
 361}
 362
 363#ifdef CONFIG_MMC_DW_IDMAC
 364static void dw_mci_dma_cleanup(struct dw_mci *host)
 365{
 366	struct mmc_data *data = host->data;
 367
 368	if (data)
 369		if (!data->host_cookie)
 370			dma_unmap_sg(host->dev,
 371				     data->sg,
 372				     data->sg_len,
 373				     dw_mci_get_dma_dir(data));
 
 374}
 375
 376static void dw_mci_idmac_reset(struct dw_mci *host)
 377{
 378	u32 bmod = mci_readl(host, BMOD);
 379	/* Software reset of DMA */
 380	bmod |= SDMMC_IDMAC_SWRESET;
 381	mci_writel(host, BMOD, bmod);
 382}
 383
 384static void dw_mci_idmac_stop_dma(struct dw_mci *host)
 385{
 386	u32 temp;
 387
 388	/* Disable and reset the IDMAC interface */
 389	temp = mci_readl(host, CTRL);
 390	temp &= ~SDMMC_CTRL_USE_IDMAC;
 391	temp |= SDMMC_CTRL_DMA_RESET;
 392	mci_writel(host, CTRL, temp);
 393
 394	/* Stop the IDMAC running */
 395	temp = mci_readl(host, BMOD);
 396	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
 397	temp |= SDMMC_IDMAC_SWRESET;
 398	mci_writel(host, BMOD, temp);
 399}
 400
 401static void dw_mci_idmac_complete_dma(struct dw_mci *host)
 402{
 
 403	struct mmc_data *data = host->data;
 404
 405	dev_vdbg(host->dev, "DMA complete\n");
 406
 
 
 
 
 
 
 
 
 407	host->dma_ops->cleanup(host);
 408
 409	/*
 410	 * If the card was removed, data will be NULL. No point in trying to
 411	 * send the stop command or waiting for NBUSY in this case.
 412	 */
 413	if (data) {
 414		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 415		tasklet_schedule(&host->tasklet);
 416	}
 417}
 418
 419static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
 420				    unsigned int sg_len)
 421{
 422	int i;
 423	struct idmac_desc *desc = host->sg_cpu;
 424
 425	for (i = 0; i < sg_len; i++, desc++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 426		unsigned int length = sg_dma_len(&data->sg[i]);
 
 427		u32 mem_addr = sg_dma_address(&data->sg[i]);
 428
 429		/* Set the OWN bit and disable interrupts for this descriptor */
 430		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431
 432		/* Buffer length */
 433		IDMAC_SET_BUFFER1_SIZE(desc, length);
 434
 435		/* Physical address to DMA to/from */
 436		desc->des2 = mem_addr;
 
 
 
 
 437	}
 438
 439	/* Set first descriptor */
 440	desc = host->sg_cpu;
 441	desc->des0 |= IDMAC_DES0_FD;
 442
 443	/* Set last descriptor */
 444	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
 445	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
 446	desc->des0 |= IDMAC_DES0_LD;
 447
 448	wmb();
 
 
 
 
 
 
 449}
 450
 451static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 452{
 453	u32 temp;
 
 
 
 
 
 
 454
 455	dw_mci_translate_sglist(host, host->data, sg_len);
 
 
 
 
 
 
 
 
 456
 457	/* Select IDMAC interface */
 458	temp = mci_readl(host, CTRL);
 459	temp |= SDMMC_CTRL_USE_IDMAC;
 460	mci_writel(host, CTRL, temp);
 461
 
 462	wmb();
 463
 464	/* Enable the IDMAC */
 465	temp = mci_readl(host, BMOD);
 466	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
 467	mci_writel(host, BMOD, temp);
 468
 469	/* Start it running */
 470	mci_writel(host, PLDMND, 1);
 
 
 
 471}
 472
 473static int dw_mci_idmac_init(struct dw_mci *host)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 474{
 475	struct idmac_desc *p;
 476	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477
 478	/* Number of descriptors in the ring buffer */
 479	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
 480
 481	/* Forward link the descriptor list */
 482	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
 483		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
 484
 485	/* Set the last descriptor as the end-of-ring descriptor */
 486	p->des3 = host->sg_dma;
 487	p->des0 = IDMAC_DES0_ER;
 488
 489	dw_mci_idmac_reset(host);
 
 
 
 
 
 490
 491	/* Mask out interrupts - get Tx & Rx complete only */
 492	mci_writel(host, IDSTS, IDMAC_INT_CLR);
 493	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
 494		   SDMMC_IDMAC_INT_TI);
 
 
 
 
 
 495
 496	/* Set the descriptor base address */
 497	mci_writel(host, DBADDR, host->sg_dma);
 498	return 0;
 499}
 500
 501static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
 502	.init = dw_mci_idmac_init,
 503	.start = dw_mci_idmac_start_dma,
 504	.stop = dw_mci_idmac_stop_dma,
 505	.complete = dw_mci_idmac_complete_dma,
 
 
 
 
 
 
 
 
 
 
 
 
 
 506	.cleanup = dw_mci_dma_cleanup,
 507};
 508#endif /* CONFIG_MMC_DW_IDMAC */
 509
 510static int dw_mci_pre_dma_transfer(struct dw_mci *host,
 511				   struct mmc_data *data,
 512				   bool next)
 513{
 514	struct scatterlist *sg;
 515	unsigned int i, sg_len;
 516
 517	if (!next && data->host_cookie)
 518		return data->host_cookie;
 519
 520	/*
 521	 * We don't do DMA on "complex" transfers, i.e. with
 522	 * non-word-aligned buffers or lengths. Also, we don't bother
 523	 * with all the DMA setup overhead for short transfers.
 524	 */
 525	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
 526		return -EINVAL;
 527
 528	if (data->blksz & 3)
 529		return -EINVAL;
 530
 531	for_each_sg(data->sg, sg, data->sg_len, i) {
 532		if (sg->offset & 3 || sg->length & 3)
 533			return -EINVAL;
 534	}
 535
 536	sg_len = dma_map_sg(host->dev,
 537			    data->sg,
 538			    data->sg_len,
 539			    dw_mci_get_dma_dir(data));
 540	if (sg_len == 0)
 541		return -EINVAL;
 542
 543	if (next)
 544		data->host_cookie = sg_len;
 545
 546	return sg_len;
 547}
 548
 549static void dw_mci_pre_req(struct mmc_host *mmc,
 550			   struct mmc_request *mrq,
 551			   bool is_first_req)
 552{
 553	struct dw_mci_slot *slot = mmc_priv(mmc);
 554	struct mmc_data *data = mrq->data;
 555
 556	if (!slot->host->use_dma || !data)
 557		return;
 558
 559	if (data->host_cookie) {
 560		data->host_cookie = 0;
 561		return;
 562	}
 563
 564	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
 565		data->host_cookie = 0;
 
 566}
 567
 568static void dw_mci_post_req(struct mmc_host *mmc,
 569			    struct mmc_request *mrq,
 570			    int err)
 571{
 572	struct dw_mci_slot *slot = mmc_priv(mmc);
 573	struct mmc_data *data = mrq->data;
 574
 575	if (!slot->host->use_dma || !data)
 576		return;
 577
 578	if (data->host_cookie)
 579		dma_unmap_sg(slot->host->dev,
 580			     data->sg,
 581			     data->sg_len,
 582			     dw_mci_get_dma_dir(data));
 583	data->host_cookie = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584}
 585
 586static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
 587{
 588#ifdef CONFIG_MMC_DW_IDMAC
 589	unsigned int blksz = data->blksz;
 590	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 591	u32 fifo_width = 1 << host->data_shift;
 592	u32 blksz_depth = blksz / fifo_width, fifoth_val;
 593	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
 594	int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
 
 
 
 
 595
 596	tx_wmark = (host->fifo_depth) / 2;
 597	tx_wmark_invers = host->fifo_depth - tx_wmark;
 598
 599	/*
 600	 * MSIZE is '1',
 601	 * if blksz is not a multiple of the FIFO width
 602	 */
 603	if (blksz % fifo_width) {
 604		msize = 0;
 605		rx_wmark = 1;
 606		goto done;
 607	}
 608
 609	do {
 610		if (!((blksz_depth % mszs[idx]) ||
 611		     (tx_wmark_invers % mszs[idx]))) {
 612			msize = idx;
 613			rx_wmark = mszs[idx] - 1;
 614			break;
 615		}
 616	} while (--idx > 0);
 617	/*
 618	 * If idx is '0', it won't be tried
 619	 * Thus, initial values are uesed
 620	 */
 621done:
 622	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
 623	mci_writel(host, FIFOTH, fifoth_val);
 624#endif
 625}
 626
 627static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
 628{
 629	unsigned int blksz = data->blksz;
 630	u32 blksz_depth, fifo_depth;
 631	u16 thld_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 632
 633	WARN_ON(!(data->flags & MMC_DATA_READ));
 
 
 
 634
 635	if (host->timing != MMC_TIMING_MMC_HS200 &&
 636	    host->timing != MMC_TIMING_UHS_SDR104)
 
 637		goto disable;
 638
 639	blksz_depth = blksz / (1 << host->data_shift);
 640	fifo_depth = host->fifo_depth;
 641
 642	if (blksz_depth > fifo_depth)
 643		goto disable;
 644
 645	/*
 646	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
 647	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
 648	 * Currently just choose blksz.
 649	 */
 650	thld_size = blksz;
 651	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
 652	return;
 653
 654disable:
 655	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
 656}
 657
 658static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
 659{
 
 660	int sg_len;
 661	u32 temp;
 662
 663	host->using_dma = 0;
 664
 665	/* If we don't have a channel, we can't do DMA */
 666	if (!host->use_dma)
 667		return -ENODEV;
 668
 669	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
 670	if (sg_len < 0) {
 671		host->dma_ops->stop(host);
 672		return sg_len;
 673	}
 674
 675	host->using_dma = 1;
 676
 677	dev_vdbg(host->dev,
 678		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
 679		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
 680		 sg_len);
 
 
 681
 682	/*
 683	 * Decide the MSIZE and RX/TX Watermark.
 684	 * If current block size is same with previous size,
 685	 * no need to update fifoth.
 686	 */
 687	if (host->prev_blksz != data->blksz)
 688		dw_mci_adjust_fifoth(host, data);
 689
 690	/* Enable the DMA interface */
 691	temp = mci_readl(host, CTRL);
 692	temp |= SDMMC_CTRL_DMA_ENABLE;
 693	mci_writel(host, CTRL, temp);
 694
 695	/* Disable RX/TX IRQs, let DMA handle it */
 
 696	temp = mci_readl(host, INTMASK);
 697	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
 698	mci_writel(host, INTMASK, temp);
 
 699
 700	host->dma_ops->start(host, sg_len);
 
 
 
 
 
 
 
 701
 702	return 0;
 703}
 704
 705static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
 706{
 
 
 707	u32 temp;
 708
 709	data->error = -EINPROGRESS;
 710
 711	WARN_ON(host->data);
 712	host->sg = NULL;
 713	host->data = data;
 714
 715	if (data->flags & MMC_DATA_READ) {
 716		host->dir_status = DW_MCI_RECV_STATUS;
 717		dw_mci_ctrl_rd_thld(host, data);
 718	} else {
 719		host->dir_status = DW_MCI_SEND_STATUS;
 720	}
 
 721
 722	if (dw_mci_submit_data_dma(host, data)) {
 723		int flags = SG_MITER_ATOMIC;
 724		if (host->data->flags & MMC_DATA_READ)
 725			flags |= SG_MITER_TO_SG;
 726		else
 727			flags |= SG_MITER_FROM_SG;
 728
 729		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
 730		host->sg = data->sg;
 731		host->part_buf_start = 0;
 732		host->part_buf_count = 0;
 733
 734		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
 
 
 735		temp = mci_readl(host, INTMASK);
 736		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
 737		mci_writel(host, INTMASK, temp);
 
 738
 739		temp = mci_readl(host, CTRL);
 740		temp &= ~SDMMC_CTRL_DMA_ENABLE;
 741		mci_writel(host, CTRL, temp);
 742
 743		/*
 744		 * Use the initial fifoth_val for PIO mode.
 
 745		 * If next issued data may be transfered by DMA mode,
 746		 * prev_blksz should be invalidated.
 747		 */
 748		mci_writel(host, FIFOTH, host->fifoth_val);
 
 
 
 749		host->prev_blksz = 0;
 750	} else {
 751		/*
 752		 * Keep the current block size.
 753		 * It will be used to decide whether to update
 754		 * fifoth register next time.
 755		 */
 756		host->prev_blksz = data->blksz;
 757	}
 758}
 759
 760static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
 761{
 762	struct dw_mci *host = slot->host;
 763	unsigned long timeout = jiffies + msecs_to_jiffies(500);
 764	unsigned int cmd_status = 0;
 765
 766	mci_writel(host, CMDARG, arg);
 767	wmb();
 768	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
 769
 770	while (time_before(jiffies, timeout)) {
 771		cmd_status = mci_readl(host, CMD);
 772		if (!(cmd_status & SDMMC_CMD_START))
 773			return;
 774	}
 775	dev_err(&slot->mmc->class_dev,
 776		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
 777		cmd, arg, cmd_status);
 778}
 779
 780static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 781{
 782	struct dw_mci *host = slot->host;
 783	unsigned int clock = slot->clock;
 784	u32 div;
 785	u32 clk_en_a;
 
 
 
 
 
 
 
 786
 787	if (!clock) {
 788		mci_writel(host, CLKENA, 0);
 789		mci_send_cmd(slot,
 790			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 791	} else if (clock != host->current_speed || force_clkinit) {
 792		div = host->bus_hz / clock;
 793		if (host->bus_hz % clock && host->bus_hz > clock)
 794			/*
 795			 * move the + 1 after the divide to prevent
 796			 * over-clocking the card.
 797			 */
 798			div += 1;
 799
 800		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
 801
 802		if ((clock << div) != slot->__clk_old || force_clkinit)
 803			dev_info(&slot->mmc->class_dev,
 804				 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
 805				 slot->id, host->bus_hz, clock,
 806				 div ? ((host->bus_hz / div) >> 1) :
 807				 host->bus_hz, div);
 
 
 
 
 
 
 
 
 
 
 
 
 
 808
 809		/* disable clock */
 810		mci_writel(host, CLKENA, 0);
 811		mci_writel(host, CLKSRC, 0);
 812
 813		/* inform CIU */
 814		mci_send_cmd(slot,
 815			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 816
 817		/* set clock to desired speed */
 818		mci_writel(host, CLKDIV, div);
 819
 820		/* inform CIU */
 821		mci_send_cmd(slot,
 822			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 823
 824		/* enable clock; only low power if no SDIO */
 825		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
 826		if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
 827			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
 828		mci_writel(host, CLKENA, clk_en_a);
 829
 830		/* inform CIU */
 831		mci_send_cmd(slot,
 832			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 833
 834		/* keep the clock with reflecting clock dividor */
 835		slot->__clk_old = clock << div;
 
 
 836	}
 837
 838	host->current_speed = clock;
 839
 840	/* Set the current slot bus width */
 841	mci_writel(host, CTYPE, (slot->ctype << slot->id));
 842}
 843
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 844static void __dw_mci_start_request(struct dw_mci *host,
 845				   struct dw_mci_slot *slot,
 846				   struct mmc_command *cmd)
 847{
 848	struct mmc_request *mrq;
 849	struct mmc_data	*data;
 850	u32 cmdflags;
 851
 852	mrq = slot->mrq;
 853	if (host->pdata->select_slot)
 854		host->pdata->select_slot(slot->id);
 855
 856	host->cur_slot = slot;
 857	host->mrq = mrq;
 858
 859	host->pending_events = 0;
 860	host->completed_events = 0;
 861	host->cmd_status = 0;
 862	host->data_status = 0;
 863	host->dir_status = 0;
 864
 865	data = cmd->data;
 866	if (data) {
 867		dw_mci_set_timeout(host);
 868		mci_writel(host, BYTCNT, data->blksz*data->blocks);
 869		mci_writel(host, BLKSIZ, data->blksz);
 870	}
 871
 872	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
 873
 874	/* this is the first command, send the initialization clock */
 875	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
 876		cmdflags |= SDMMC_CMD_INIT;
 877
 878	if (data) {
 879		dw_mci_submit_data(host, data);
 880		wmb();
 881	}
 882
 883	dw_mci_start_command(host, cmd, cmdflags);
 884
 885	if (mrq->stop)
 886		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
 887	else
 888		host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 889}
 890
 891static void dw_mci_start_request(struct dw_mci *host,
 892				 struct dw_mci_slot *slot)
 893{
 894	struct mmc_request *mrq = slot->mrq;
 895	struct mmc_command *cmd;
 896
 897	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
 898	__dw_mci_start_request(host, slot, cmd);
 899}
 900
 901/* must be called with host->lock held */
 902static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
 903				 struct mmc_request *mrq)
 904{
 905	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
 906		 host->state);
 907
 908	slot->mrq = mrq;
 909
 
 
 
 
 
 
 
 
 
 
 
 910	if (host->state == STATE_IDLE) {
 911		host->state = STATE_SENDING_CMD;
 912		dw_mci_start_request(host, slot);
 913	} else {
 914		list_add_tail(&slot->queue_node, &host->queue);
 915	}
 916}
 917
 918static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 919{
 920	struct dw_mci_slot *slot = mmc_priv(mmc);
 921	struct dw_mci *host = slot->host;
 922
 923	WARN_ON(slot->mrq);
 924
 925	/*
 926	 * The check for card presence and queueing of the request must be
 927	 * atomic, otherwise the card could be removed in between and the
 928	 * request wouldn't fail until another card was inserted.
 929	 */
 930	spin_lock_bh(&host->lock);
 931
 932	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
 933		spin_unlock_bh(&host->lock);
 934		mrq->cmd->error = -ENOMEDIUM;
 935		mmc_request_done(mmc, mrq);
 936		return;
 937	}
 938
 
 
 939	dw_mci_queue_request(host, slot, mrq);
 940
 941	spin_unlock_bh(&host->lock);
 942}
 943
 944static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 945{
 946	struct dw_mci_slot *slot = mmc_priv(mmc);
 947	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
 948	u32 regs;
 
 949
 950	switch (ios->bus_width) {
 951	case MMC_BUS_WIDTH_4:
 952		slot->ctype = SDMMC_CTYPE_4BIT;
 953		break;
 954	case MMC_BUS_WIDTH_8:
 955		slot->ctype = SDMMC_CTYPE_8BIT;
 956		break;
 957	default:
 958		/* set default 1 bit mode */
 959		slot->ctype = SDMMC_CTYPE_1BIT;
 960	}
 961
 962	regs = mci_readl(slot->host, UHS_REG);
 963
 964	/* DDR mode set */
 965	if (ios->timing == MMC_TIMING_UHS_DDR50)
 
 
 966		regs |= ((0x1 << slot->id) << 16);
 967	else
 968		regs &= ~((0x1 << slot->id) << 16);
 969
 970	mci_writel(slot->host, UHS_REG, regs);
 971	slot->host->timing = ios->timing;
 972
 973	/*
 974	 * Use mirror of ios->clock to prevent race with mmc
 975	 * core ios update when finding the minimum.
 976	 */
 977	slot->clock = ios->clock;
 978
 979	if (drv_data && drv_data->set_ios)
 980		drv_data->set_ios(slot->host, ios);
 981
 982	/* Slot specific timing and width adjustment */
 983	dw_mci_setup_bus(slot, false);
 984
 985	switch (ios->power_mode) {
 986	case MMC_POWER_UP:
 
 
 
 
 
 
 
 
 
 
 987		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
 988		/* Power up slot */
 989		if (slot->host->pdata->setpower)
 990			slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
 991		regs = mci_readl(slot->host, PWREN);
 992		regs |= (1 << slot->id);
 993		mci_writel(slot->host, PWREN, regs);
 994		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995	case MMC_POWER_OFF:
 996		/* Power down slot */
 997		if (slot->host->pdata->setpower)
 998			slot->host->pdata->setpower(slot->id, 0);
 
 
 
 
 
 
 
 999		regs = mci_readl(slot->host, PWREN);
1000		regs &= ~(1 << slot->id);
1001		mci_writel(slot->host, PWREN, regs);
1002		break;
1003	default:
1004		break;
1005	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006}
1007
1008static int dw_mci_get_ro(struct mmc_host *mmc)
1009{
1010	int read_only;
1011	struct dw_mci_slot *slot = mmc_priv(mmc);
1012	struct dw_mci_board *brd = slot->host->pdata;
1013
1014	/* Use platform get_ro function, else try on board write protect */
1015	if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1016		read_only = 0;
1017	else if (brd->get_ro)
1018		read_only = brd->get_ro(slot->id);
1019	else if (gpio_is_valid(slot->wp_gpio))
1020		read_only = gpio_get_value(slot->wp_gpio);
1021	else
1022		read_only =
1023			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1024
1025	dev_dbg(&mmc->class_dev, "card is %s\n",
1026		read_only ? "read-only" : "read-write");
1027
1028	return read_only;
1029}
1030
1031static int dw_mci_get_cd(struct mmc_host *mmc)
1032{
1033	int present;
1034	struct dw_mci_slot *slot = mmc_priv(mmc);
1035	struct dw_mci_board *brd = slot->host->pdata;
1036	struct dw_mci *host = slot->host;
1037	int gpio_cd = mmc_gpio_get_cd(mmc);
1038
1039	/* Use platform get_cd function, else try onboard card detect */
1040	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1041		present = 1;
1042	else if (brd->get_cd)
1043		present = !brd->get_cd(slot->id);
1044	else if (!IS_ERR_VALUE(gpio_cd))
1045		present = gpio_cd;
1046	else
1047		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1048			== 0 ? 1 : 0;
1049
1050	spin_lock_bh(&host->lock);
1051	if (present) {
1052		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1053		dev_dbg(&mmc->class_dev, "card is present\n");
1054	} else {
1055		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1056		dev_dbg(&mmc->class_dev, "card is not present\n");
1057	}
1058	spin_unlock_bh(&host->lock);
1059
1060	return present;
 
 
 
 
 
 
 
 
 
 
 
 
1061}
1062
1063/*
1064 * Disable lower power mode.
1065 *
1066 * Low power mode will stop the card clock when idle.  According to the
1067 * description of the CLKENA register we should disable low power mode
1068 * for SDIO cards if we need SDIO interrupts to work.
1069 *
1070 * This function is fast if low power mode is already disabled.
1071 */
1072static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1073{
1074	struct dw_mci *host = slot->host;
 
 
1075	u32 clk_en_a;
1076	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1077
1078	clk_en_a = mci_readl(host, CLKENA);
 
 
 
 
 
 
 
 
 
 
 
 
 
1079
1080	if (clk_en_a & clken_low_pwr) {
1081		mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1082		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1083			     SDMMC_CMD_PRV_DAT_WAIT, 0);
1084	}
1085}
1086
1087static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1088{
1089	struct dw_mci_slot *slot = mmc_priv(mmc);
1090	struct dw_mci *host = slot->host;
 
1091	u32 int_mask;
1092
 
 
1093	/* Enable/disable Slot Specific SDIO interrupt */
1094	int_mask = mci_readl(host, INTMASK);
1095	if (enb) {
1096		/*
1097		 * Turn off low power mode if it was enabled.  This is a bit of
1098		 * a heavy operation and we disable / enable IRQs a lot, so
1099		 * we'll leave low power mode disabled and it will get
1100		 * re-enabled again in dw_mci_setup_bus().
1101		 */
1102		dw_mci_disable_low_power(slot);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103
1104		mci_writel(host, INTMASK,
1105			   (int_mask | SDMMC_INT_SDIO(slot->id)));
1106	} else {
1107		mci_writel(host, INTMASK,
1108			   (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1109	}
1110}
1111
1112static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1113{
1114	struct dw_mci_slot *slot = mmc_priv(mmc);
1115	struct dw_mci *host = slot->host;
1116	const struct dw_mci_drv_data *drv_data = host->drv_data;
1117	struct dw_mci_tuning_data tuning_data;
1118	int err = -ENOSYS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
1120	if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1121		if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1122			tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1123			tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1124		} else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1125			tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1126			tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1127		} else {
1128			return -EINVAL;
1129		}
1130	} else if (opcode == MMC_SEND_TUNING_BLOCK) {
1131		tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1132		tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
 
1133	} else {
1134		dev_err(host->dev,
1135			"Undefined command(%d) for tuning\n", opcode);
1136		return -EINVAL;
 
 
 
 
1137	}
1138
1139	if (drv_data && drv_data->execute_tuning)
1140		err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1141	return err;
 
 
 
 
 
 
 
 
1142}
1143
1144static const struct mmc_host_ops dw_mci_ops = {
1145	.request		= dw_mci_request,
1146	.pre_req		= dw_mci_pre_req,
1147	.post_req		= dw_mci_post_req,
1148	.set_ios		= dw_mci_set_ios,
1149	.get_ro			= dw_mci_get_ro,
1150	.get_cd			= dw_mci_get_cd,
 
1151	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
 
1152	.execute_tuning		= dw_mci_execute_tuning,
 
 
 
1153};
1154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1155static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1156	__releases(&host->lock)
1157	__acquires(&host->lock)
1158{
1159	struct dw_mci_slot *slot;
1160	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
1161
1162	WARN_ON(host->cmd || host->data);
1163
1164	host->cur_slot->mrq = NULL;
1165	host->mrq = NULL;
1166	if (!list_empty(&host->queue)) {
1167		slot = list_entry(host->queue.next,
1168				  struct dw_mci_slot, queue_node);
1169		list_del(&slot->queue_node);
1170		dev_vdbg(host->dev, "list not empty: %s is next\n",
1171			 mmc_hostname(slot->mmc));
1172		host->state = STATE_SENDING_CMD;
1173		dw_mci_start_request(host, slot);
1174	} else {
1175		dev_vdbg(host->dev, "list empty\n");
1176		host->state = STATE_IDLE;
 
 
 
 
1177	}
1178
1179	spin_unlock(&host->lock);
1180	mmc_request_done(prev_mmc, mrq);
1181	spin_lock(&host->lock);
1182}
1183
1184static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1185{
1186	u32 status = host->cmd_status;
1187
1188	host->cmd_status = 0;
1189
1190	/* Read the response from the card (up to 16 bytes) */
1191	if (cmd->flags & MMC_RSP_PRESENT) {
1192		if (cmd->flags & MMC_RSP_136) {
1193			cmd->resp[3] = mci_readl(host, RESP0);
1194			cmd->resp[2] = mci_readl(host, RESP1);
1195			cmd->resp[1] = mci_readl(host, RESP2);
1196			cmd->resp[0] = mci_readl(host, RESP3);
1197		} else {
1198			cmd->resp[0] = mci_readl(host, RESP0);
1199			cmd->resp[1] = 0;
1200			cmd->resp[2] = 0;
1201			cmd->resp[3] = 0;
1202		}
1203	}
1204
1205	if (status & SDMMC_INT_RTO)
1206		cmd->error = -ETIMEDOUT;
1207	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1208		cmd->error = -EILSEQ;
1209	else if (status & SDMMC_INT_RESP_ERR)
1210		cmd->error = -EIO;
1211	else
1212		cmd->error = 0;
1213
1214	if (cmd->error) {
1215		/* newer ip versions need a delay between retries */
1216		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1217			mdelay(20);
1218	}
1219
1220	return cmd->error;
1221}
1222
1223static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1224{
1225	u32 status = host->data_status;
1226
1227	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1228		if (status & SDMMC_INT_DRTO) {
1229			data->error = -ETIMEDOUT;
1230		} else if (status & SDMMC_INT_DCRC) {
1231			data->error = -EILSEQ;
1232		} else if (status & SDMMC_INT_EBE) {
1233			if (host->dir_status ==
1234				DW_MCI_SEND_STATUS) {
1235				/*
1236				 * No data CRC status was returned.
1237				 * The number of bytes transferred
1238				 * will be exaggerated in PIO mode.
1239				 */
1240				data->bytes_xfered = 0;
1241				data->error = -ETIMEDOUT;
1242			} else if (host->dir_status ==
1243					DW_MCI_RECV_STATUS) {
1244				data->error = -EIO;
1245			}
1246		} else {
1247			/* SDMMC_INT_SBE is included */
1248			data->error = -EIO;
1249		}
1250
1251		dev_err(host->dev, "data error, status 0x%08x\n", status);
1252
1253		/*
1254		 * After an error, there may be data lingering
1255		 * in the FIFO
1256		 */
1257		dw_mci_fifo_reset(host);
1258	} else {
1259		data->bytes_xfered = data->blocks * data->blksz;
1260		data->error = 0;
1261	}
1262
1263	return data->error;
1264}
1265
1266static void dw_mci_tasklet_func(unsigned long priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267{
1268	struct dw_mci *host = (struct dw_mci *)priv;
1269	struct mmc_data	*data;
1270	struct mmc_command *cmd;
1271	struct mmc_request *mrq;
1272	enum dw_mci_state state;
1273	enum dw_mci_state prev_state;
1274	unsigned int err;
1275
1276	spin_lock(&host->lock);
1277
1278	state = host->state;
1279	data = host->data;
1280	mrq = host->mrq;
1281
1282	do {
1283		prev_state = state;
1284
1285		switch (state) {
1286		case STATE_IDLE:
 
1287			break;
1288
 
1289		case STATE_SENDING_CMD:
1290			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1291						&host->pending_events))
1292				break;
1293
1294			cmd = host->cmd;
1295			host->cmd = NULL;
1296			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1297			err = dw_mci_command_complete(host, cmd);
1298			if (cmd == mrq->sbc && !err) {
1299				prev_state = state = STATE_SENDING_CMD;
1300				__dw_mci_start_request(host, host->cur_slot,
1301						       mrq->cmd);
1302				goto unlock;
1303			}
1304
1305			if (cmd->data && err) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1306				dw_mci_stop_dma(host);
1307				send_stop_abort(host, data);
1308				state = STATE_SENDING_STOP;
1309				break;
1310			}
1311
1312			if (!cmd->data || err) {
1313				dw_mci_request_end(host, mrq);
1314				goto unlock;
1315			}
1316
1317			prev_state = state = STATE_SENDING_DATA;
1318			/* fall through */
1319
1320		case STATE_SENDING_DATA:
 
 
 
 
 
 
 
 
1321			if (test_and_clear_bit(EVENT_DATA_ERROR,
1322					       &host->pending_events)) {
 
 
 
1323				dw_mci_stop_dma(host);
1324				send_stop_abort(host, data);
1325				state = STATE_DATA_ERROR;
1326				break;
1327			}
1328
1329			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1330						&host->pending_events))
 
 
 
 
 
 
1331				break;
 
1332
1333			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334			prev_state = state = STATE_DATA_BUSY;
1335			/* fall through */
 
1336
1337		case STATE_DATA_BUSY:
1338			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1339						&host->pending_events))
 
 
 
 
 
 
1340				break;
 
1341
 
1342			host->data = NULL;
1343			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1344			err = dw_mci_data_complete(host, data);
1345
1346			if (!err) {
1347				if (!data->stop || mrq->sbc) {
1348					if (mrq->sbc && data->stop)
1349						data->stop->error = 0;
1350					dw_mci_request_end(host, mrq);
1351					goto unlock;
1352				}
1353
1354				/* stop command for open-ended transfer*/
1355				if (data->stop)
1356					send_stop_abort(host, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357			}
1358
1359			/*
1360			 * If err has non-zero,
1361			 * stop-abort command has been already issued.
1362			 */
1363			prev_state = state = STATE_SENDING_STOP;
1364
1365			/* fall through */
1366
1367		case STATE_SENDING_STOP:
1368			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1369						&host->pending_events))
1370				break;
1371
1372			/* CMD error in data command */
1373			if (mrq->cmd->error && mrq->data)
1374				dw_mci_fifo_reset(host);
1375
 
1376			host->cmd = NULL;
1377			host->data = NULL;
1378
1379			if (mrq->stop)
1380				dw_mci_command_complete(host, mrq->stop);
1381			else
1382				host->cmd_status = 0;
1383
1384			dw_mci_request_end(host, mrq);
1385			goto unlock;
1386
1387		case STATE_DATA_ERROR:
1388			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1389						&host->pending_events))
1390				break;
1391
1392			state = STATE_DATA_BUSY;
1393			break;
1394		}
1395	} while (state != prev_state);
1396
1397	host->state = state;
1398unlock:
1399	spin_unlock(&host->lock);
1400
1401}
1402
1403/* push final bytes to part_buf, only use during push */
1404static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1405{
1406	memcpy((void *)&host->part_buf, buf, cnt);
1407	host->part_buf_count = cnt;
1408}
1409
1410/* append bytes to part_buf, only use during push */
1411static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1412{
1413	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1414	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1415	host->part_buf_count += cnt;
1416	return cnt;
1417}
1418
1419/* pull first bytes from part_buf, only use during pull */
1420static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1421{
1422	cnt = min(cnt, (int)host->part_buf_count);
1423	if (cnt) {
1424		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1425		       cnt);
1426		host->part_buf_count -= cnt;
1427		host->part_buf_start += cnt;
1428	}
1429	return cnt;
1430}
1431
1432/* pull final bytes from the part_buf, assuming it's just been filled */
1433static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1434{
1435	memcpy(buf, &host->part_buf, cnt);
1436	host->part_buf_start = cnt;
1437	host->part_buf_count = (1 << host->data_shift) - cnt;
1438}
1439
1440static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1441{
1442	struct mmc_data *data = host->data;
1443	int init_cnt = cnt;
1444
1445	/* try and push anything in the part_buf */
1446	if (unlikely(host->part_buf_count)) {
1447		int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1448		buf += len;
1449		cnt -= len;
1450		if (host->part_buf_count == 2) {
1451			mci_writew(host, DATA(host->data_offset),
1452					host->part_buf16);
1453			host->part_buf_count = 0;
1454		}
1455	}
1456#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1457	if (unlikely((unsigned long)buf & 0x1)) {
1458		while (cnt >= 2) {
1459			u16 aligned_buf[64];
1460			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1461			int items = len >> 1;
1462			int i;
1463			/* memcpy from input buffer into aligned buffer */
1464			memcpy(aligned_buf, buf, len);
1465			buf += len;
1466			cnt -= len;
1467			/* push data from aligned buffer into fifo */
1468			for (i = 0; i < items; ++i)
1469				mci_writew(host, DATA(host->data_offset),
1470						aligned_buf[i]);
1471		}
1472	} else
1473#endif
1474	{
1475		u16 *pdata = buf;
 
1476		for (; cnt >= 2; cnt -= 2)
1477			mci_writew(host, DATA(host->data_offset), *pdata++);
1478		buf = pdata;
1479	}
1480	/* put anything remaining in the part_buf */
1481	if (cnt) {
1482		dw_mci_set_part_bytes(host, buf, cnt);
1483		 /* Push data if we have reached the expected data length */
1484		if ((data->bytes_xfered + init_cnt) ==
1485		    (data->blksz * data->blocks))
1486			mci_writew(host, DATA(host->data_offset),
1487				   host->part_buf16);
1488	}
1489}
1490
1491static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1492{
1493#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1494	if (unlikely((unsigned long)buf & 0x1)) {
1495		while (cnt >= 2) {
1496			/* pull data from fifo into aligned buffer */
1497			u16 aligned_buf[64];
1498			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1499			int items = len >> 1;
1500			int i;
 
1501			for (i = 0; i < items; ++i)
1502				aligned_buf[i] = mci_readw(host,
1503						DATA(host->data_offset));
1504			/* memcpy from aligned buffer into output buffer */
1505			memcpy(buf, aligned_buf, len);
1506			buf += len;
1507			cnt -= len;
1508		}
1509	} else
1510#endif
1511	{
1512		u16 *pdata = buf;
 
1513		for (; cnt >= 2; cnt -= 2)
1514			*pdata++ = mci_readw(host, DATA(host->data_offset));
1515		buf = pdata;
1516	}
1517	if (cnt) {
1518		host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1519		dw_mci_pull_final_bytes(host, buf, cnt);
1520	}
1521}
1522
1523static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1524{
1525	struct mmc_data *data = host->data;
1526	int init_cnt = cnt;
1527
1528	/* try and push anything in the part_buf */
1529	if (unlikely(host->part_buf_count)) {
1530		int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1531		buf += len;
1532		cnt -= len;
1533		if (host->part_buf_count == 4) {
1534			mci_writel(host, DATA(host->data_offset),
1535					host->part_buf32);
1536			host->part_buf_count = 0;
1537		}
1538	}
1539#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1540	if (unlikely((unsigned long)buf & 0x3)) {
1541		while (cnt >= 4) {
1542			u32 aligned_buf[32];
1543			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1544			int items = len >> 2;
1545			int i;
1546			/* memcpy from input buffer into aligned buffer */
1547			memcpy(aligned_buf, buf, len);
1548			buf += len;
1549			cnt -= len;
1550			/* push data from aligned buffer into fifo */
1551			for (i = 0; i < items; ++i)
1552				mci_writel(host, DATA(host->data_offset),
1553						aligned_buf[i]);
1554		}
1555	} else
1556#endif
1557	{
1558		u32 *pdata = buf;
 
1559		for (; cnt >= 4; cnt -= 4)
1560			mci_writel(host, DATA(host->data_offset), *pdata++);
1561		buf = pdata;
1562	}
1563	/* put anything remaining in the part_buf */
1564	if (cnt) {
1565		dw_mci_set_part_bytes(host, buf, cnt);
1566		 /* Push data if we have reached the expected data length */
1567		if ((data->bytes_xfered + init_cnt) ==
1568		    (data->blksz * data->blocks))
1569			mci_writel(host, DATA(host->data_offset),
1570				   host->part_buf32);
1571	}
1572}
1573
1574static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1575{
1576#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1577	if (unlikely((unsigned long)buf & 0x3)) {
1578		while (cnt >= 4) {
1579			/* pull data from fifo into aligned buffer */
1580			u32 aligned_buf[32];
1581			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1582			int items = len >> 2;
1583			int i;
 
1584			for (i = 0; i < items; ++i)
1585				aligned_buf[i] = mci_readl(host,
1586						DATA(host->data_offset));
1587			/* memcpy from aligned buffer into output buffer */
1588			memcpy(buf, aligned_buf, len);
1589			buf += len;
1590			cnt -= len;
1591		}
1592	} else
1593#endif
1594	{
1595		u32 *pdata = buf;
 
1596		for (; cnt >= 4; cnt -= 4)
1597			*pdata++ = mci_readl(host, DATA(host->data_offset));
1598		buf = pdata;
1599	}
1600	if (cnt) {
1601		host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1602		dw_mci_pull_final_bytes(host, buf, cnt);
1603	}
1604}
1605
1606static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1607{
1608	struct mmc_data *data = host->data;
1609	int init_cnt = cnt;
1610
1611	/* try and push anything in the part_buf */
1612	if (unlikely(host->part_buf_count)) {
1613		int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1614		buf += len;
1615		cnt -= len;
1616
1617		if (host->part_buf_count == 8) {
1618			mci_writeq(host, DATA(host->data_offset),
1619					host->part_buf);
1620			host->part_buf_count = 0;
1621		}
1622	}
1623#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1624	if (unlikely((unsigned long)buf & 0x7)) {
1625		while (cnt >= 8) {
1626			u64 aligned_buf[16];
1627			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1628			int items = len >> 3;
1629			int i;
1630			/* memcpy from input buffer into aligned buffer */
1631			memcpy(aligned_buf, buf, len);
1632			buf += len;
1633			cnt -= len;
1634			/* push data from aligned buffer into fifo */
1635			for (i = 0; i < items; ++i)
1636				mci_writeq(host, DATA(host->data_offset),
1637						aligned_buf[i]);
1638		}
1639	} else
1640#endif
1641	{
1642		u64 *pdata = buf;
 
1643		for (; cnt >= 8; cnt -= 8)
1644			mci_writeq(host, DATA(host->data_offset), *pdata++);
1645		buf = pdata;
1646	}
1647	/* put anything remaining in the part_buf */
1648	if (cnt) {
1649		dw_mci_set_part_bytes(host, buf, cnt);
1650		/* Push data if we have reached the expected data length */
1651		if ((data->bytes_xfered + init_cnt) ==
1652		    (data->blksz * data->blocks))
1653			mci_writeq(host, DATA(host->data_offset),
1654				   host->part_buf);
1655	}
1656}
1657
1658static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1659{
1660#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1661	if (unlikely((unsigned long)buf & 0x7)) {
1662		while (cnt >= 8) {
1663			/* pull data from fifo into aligned buffer */
1664			u64 aligned_buf[16];
1665			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1666			int items = len >> 3;
1667			int i;
 
1668			for (i = 0; i < items; ++i)
1669				aligned_buf[i] = mci_readq(host,
1670						DATA(host->data_offset));
1671			/* memcpy from aligned buffer into output buffer */
1672			memcpy(buf, aligned_buf, len);
1673			buf += len;
1674			cnt -= len;
1675		}
1676	} else
1677#endif
1678	{
1679		u64 *pdata = buf;
 
1680		for (; cnt >= 8; cnt -= 8)
1681			*pdata++ = mci_readq(host, DATA(host->data_offset));
1682		buf = pdata;
1683	}
1684	if (cnt) {
1685		host->part_buf = mci_readq(host, DATA(host->data_offset));
1686		dw_mci_pull_final_bytes(host, buf, cnt);
1687	}
1688}
1689
1690static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1691{
1692	int len;
1693
1694	/* get remaining partial bytes */
1695	len = dw_mci_pull_part_bytes(host, buf, cnt);
1696	if (unlikely(len == cnt))
1697		return;
1698	buf += len;
1699	cnt -= len;
1700
1701	/* get the rest of the data */
1702	host->pull_data(host, buf, cnt);
1703}
1704
1705static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1706{
1707	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1708	void *buf;
1709	unsigned int offset;
1710	struct mmc_data	*data = host->data;
1711	int shift = host->data_shift;
1712	u32 status;
1713	unsigned int len;
1714	unsigned int remain, fcnt;
1715
1716	do {
1717		if (!sg_miter_next(sg_miter))
1718			goto done;
1719
1720		host->sg = sg_miter->piter.sg;
1721		buf = sg_miter->addr;
1722		remain = sg_miter->length;
1723		offset = 0;
1724
1725		do {
1726			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1727					<< shift) + host->part_buf_count;
1728			len = min(remain, fcnt);
1729			if (!len)
1730				break;
1731			dw_mci_pull_data(host, (void *)(buf + offset), len);
1732			data->bytes_xfered += len;
1733			offset += len;
1734			remain -= len;
1735		} while (remain);
1736
1737		sg_miter->consumed = offset;
1738		status = mci_readl(host, MINTSTS);
1739		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1740	/* if the RXDR is ready read again */
1741	} while ((status & SDMMC_INT_RXDR) ||
1742		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1743
1744	if (!remain) {
1745		if (!sg_miter_next(sg_miter))
1746			goto done;
1747		sg_miter->consumed = 0;
1748	}
1749	sg_miter_stop(sg_miter);
1750	return;
1751
1752done:
1753	sg_miter_stop(sg_miter);
1754	host->sg = NULL;
1755	smp_wmb();
1756	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1757}
1758
1759static void dw_mci_write_data_pio(struct dw_mci *host)
1760{
1761	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1762	void *buf;
1763	unsigned int offset;
1764	struct mmc_data	*data = host->data;
1765	int shift = host->data_shift;
1766	u32 status;
1767	unsigned int len;
1768	unsigned int fifo_depth = host->fifo_depth;
1769	unsigned int remain, fcnt;
1770
1771	do {
1772		if (!sg_miter_next(sg_miter))
1773			goto done;
1774
1775		host->sg = sg_miter->piter.sg;
1776		buf = sg_miter->addr;
1777		remain = sg_miter->length;
1778		offset = 0;
1779
1780		do {
1781			fcnt = ((fifo_depth -
1782				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1783					<< shift) - host->part_buf_count;
1784			len = min(remain, fcnt);
1785			if (!len)
1786				break;
1787			host->push_data(host, (void *)(buf + offset), len);
1788			data->bytes_xfered += len;
1789			offset += len;
1790			remain -= len;
1791		} while (remain);
1792
1793		sg_miter->consumed = offset;
1794		status = mci_readl(host, MINTSTS);
1795		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1796	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1797
1798	if (!remain) {
1799		if (!sg_miter_next(sg_miter))
1800			goto done;
1801		sg_miter->consumed = 0;
1802	}
1803	sg_miter_stop(sg_miter);
1804	return;
1805
1806done:
1807	sg_miter_stop(sg_miter);
1808	host->sg = NULL;
1809	smp_wmb();
1810	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1811}
1812
1813static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1814{
 
 
1815	if (!host->cmd_status)
1816		host->cmd_status = status;
1817
1818	smp_wmb();
1819
1820	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1821	tasklet_schedule(&host->tasklet);
 
 
 
 
 
 
 
 
 
 
1822}
1823
1824static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1825{
1826	struct dw_mci *host = dev_id;
1827	u32 pending;
1828	int i;
1829
1830	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1831
1832	/*
1833	 * DTO fix - version 2.10a and below, and only if internal DMA
1834	 * is configured.
1835	 */
1836	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1837		if (!pending &&
1838		    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1839			pending |= SDMMC_INT_DATA_OVER;
1840	}
 
 
 
 
 
 
 
 
1841
1842	if (pending) {
1843		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
 
 
 
1844			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1845			host->cmd_status = pending;
1846			smp_wmb();
1847			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
 
 
1848		}
1849
1850		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
 
 
 
 
 
1851			/* if there is an error report DATA_ERROR */
1852			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1853			host->data_status = pending;
1854			smp_wmb();
1855			set_bit(EVENT_DATA_ERROR, &host->pending_events);
 
 
 
 
 
 
1856			tasklet_schedule(&host->tasklet);
 
 
1857		}
1858
1859		if (pending & SDMMC_INT_DATA_OVER) {
 
 
 
 
1860			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1861			if (!host->data_status)
1862				host->data_status = pending;
1863			smp_wmb();
1864			if (host->dir_status == DW_MCI_RECV_STATUS) {
1865				if (host->sg != NULL)
1866					dw_mci_read_data_pio(host, true);
1867			}
1868			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1869			tasklet_schedule(&host->tasklet);
 
 
1870		}
1871
1872		if (pending & SDMMC_INT_RXDR) {
1873			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1874			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1875				dw_mci_read_data_pio(host, false);
1876		}
1877
1878		if (pending & SDMMC_INT_TXDR) {
1879			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1880			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1881				dw_mci_write_data_pio(host);
1882		}
1883
1884		if (pending & SDMMC_INT_CMD_DONE) {
 
 
1885			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1886			dw_mci_cmd_interrupt(host, pending);
 
 
1887		}
1888
1889		if (pending & SDMMC_INT_CD) {
1890			mci_writel(host, RINTSTS, SDMMC_INT_CD);
1891			queue_work(host->card_workqueue, &host->card_work);
1892		}
1893
1894		/* Handle SDIO Interrupts */
1895		for (i = 0; i < host->num_slots; i++) {
1896			struct dw_mci_slot *slot = host->slot[i];
1897			if (pending & SDMMC_INT_SDIO(i)) {
1898				mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1899				mmc_signal_sdio_irq(slot->mmc);
1900			}
1901		}
1902
1903	}
1904
1905#ifdef CONFIG_MMC_DW_IDMAC
1906	/* Handle DMA interrupts */
1907	pending = mci_readl(host, IDSTS);
1908	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1909		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1910		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1911		host->dma_ops->complete(host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1912	}
1913#endif
1914
1915	return IRQ_HANDLED;
1916}
1917
1918static void dw_mci_work_routine_card(struct work_struct *work)
1919{
1920	struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1921	int i;
 
 
1922
1923	for (i = 0; i < host->num_slots; i++) {
1924		struct dw_mci_slot *slot = host->slot[i];
1925		struct mmc_host *mmc = slot->mmc;
1926		struct mmc_request *mrq;
1927		int present;
1928
1929		present = dw_mci_get_cd(mmc);
1930		while (present != slot->last_detect_state) {
1931			dev_dbg(&slot->mmc->class_dev, "card %s\n",
1932				present ? "inserted" : "removed");
1933
1934			spin_lock_bh(&host->lock);
1935
1936			/* Card change detected */
1937			slot->last_detect_state = present;
1938
1939			/* Clean up queue if present */
1940			mrq = slot->mrq;
1941			if (mrq) {
1942				if (mrq == host->mrq) {
1943					host->data = NULL;
1944					host->cmd = NULL;
1945
1946					switch (host->state) {
1947					case STATE_IDLE:
1948						break;
1949					case STATE_SENDING_CMD:
1950						mrq->cmd->error = -ENOMEDIUM;
1951						if (!mrq->data)
1952							break;
1953						/* fall through */
1954					case STATE_SENDING_DATA:
1955						mrq->data->error = -ENOMEDIUM;
1956						dw_mci_stop_dma(host);
1957						break;
1958					case STATE_DATA_BUSY:
1959					case STATE_DATA_ERROR:
1960						if (mrq->data->error == -EINPROGRESS)
1961							mrq->data->error = -ENOMEDIUM;
1962						/* fall through */
1963					case STATE_SENDING_STOP:
1964						if (mrq->stop)
1965							mrq->stop->error = -ENOMEDIUM;
1966						break;
1967					}
1968
1969					dw_mci_request_end(host, mrq);
1970				} else {
1971					list_del(&slot->queue_node);
1972					mrq->cmd->error = -ENOMEDIUM;
1973					if (mrq->data)
1974						mrq->data->error = -ENOMEDIUM;
1975					if (mrq->stop)
1976						mrq->stop->error = -ENOMEDIUM;
1977
1978					spin_unlock(&host->lock);
1979					mmc_request_done(slot->mmc, mrq);
1980					spin_lock(&host->lock);
1981				}
1982			}
1983
1984			/* Power down slot */
1985			if (present == 0) {
1986				/* Clear down the FIFO */
1987				dw_mci_fifo_reset(host);
1988#ifdef CONFIG_MMC_DW_IDMAC
1989				dw_mci_idmac_reset(host);
1990#endif
1991
1992			}
1993
1994			spin_unlock_bh(&host->lock);
1995
1996			present = dw_mci_get_cd(mmc);
1997		}
1998
1999		mmc_detect_change(slot->mmc,
2000			msecs_to_jiffies(host->pdata->detect_delay_ms));
2001	}
2002}
2003
2004#ifdef CONFIG_OF
2005/* given a slot id, find out the device node representing that slot */
2006static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2007{
2008	struct device_node *np;
2009	const __be32 *addr;
2010	int len;
2011
2012	if (!dev || !dev->of_node)
2013		return NULL;
2014
2015	for_each_child_of_node(dev->of_node, np) {
2016		addr = of_get_property(np, "reg", &len);
2017		if (!addr || (len < sizeof(int)))
2018			continue;
2019		if (be32_to_cpup(addr) == slot)
2020			return np;
2021	}
2022	return NULL;
2023}
2024
2025static struct dw_mci_of_slot_quirks {
2026	char *quirk;
2027	int id;
2028} of_slot_quirks[] = {
2029	{
2030		.quirk	= "disable-wp",
2031		.id	= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2032	},
2033};
2034
2035static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2036{
2037	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2038	int quirks = 0;
2039	int idx;
2040
2041	/* get quirks */
2042	for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2043		if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2044			quirks |= of_slot_quirks[idx].id;
2045
2046	return quirks;
2047}
2048
2049/* find out bus-width for a given slot */
2050static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2051{
2052	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2053	u32 bus_wd = 1;
2054
2055	if (!np)
2056		return 1;
2057
2058	if (of_property_read_u32(np, "bus-width", &bus_wd))
2059		dev_err(dev, "bus-width property not found, assuming width"
2060			       " as 1\n");
2061	return bus_wd;
2062}
2063
2064/* find the write protect gpio for a given slot; or -1 if none specified */
2065static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2066{
2067	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2068	int gpio;
2069
2070	if (!np)
2071		return -EINVAL;
2072
2073	gpio = of_get_named_gpio(np, "wp-gpios", 0);
2074
2075	/* Having a missing entry is valid; return silently */
2076	if (!gpio_is_valid(gpio))
2077		return -EINVAL;
2078
2079	if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2080		dev_warn(dev, "gpio [%d] request failed\n", gpio);
2081		return -EINVAL;
2082	}
2083
2084	return gpio;
2085}
2086
2087/* find the cd gpio for a given slot */
2088static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2089					struct mmc_host *mmc)
2090{
2091	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2092	int gpio;
2093
2094	if (!np)
2095		return;
2096
2097	gpio = of_get_named_gpio(np, "cd-gpios", 0);
2098
2099	/* Having a missing entry is valid; return silently */
2100	if (!gpio_is_valid(gpio))
2101		return;
2102
2103	if (mmc_gpio_request_cd(mmc, gpio, 0))
2104		dev_warn(dev, "gpio [%d] request failed\n", gpio);
2105}
2106#else /* CONFIG_OF */
2107static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2108{
2109	return 0;
2110}
2111static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2112{
2113	return 1;
2114}
2115static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2116{
2117	return NULL;
2118}
2119static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2120{
2121	return -EINVAL;
2122}
2123static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2124					struct mmc_host *mmc)
2125{
2126	return;
2127}
2128#endif /* CONFIG_OF */
2129
2130static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2131{
2132	struct mmc_host *mmc;
2133	struct dw_mci_slot *slot;
2134	const struct dw_mci_drv_data *drv_data = host->drv_data;
2135	int ctrl_id, ret;
2136	u32 freq[2];
2137	u8 bus_width;
2138
2139	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2140	if (!mmc)
2141		return -ENOMEM;
2142
2143	slot = mmc_priv(mmc);
2144	slot->id = id;
 
2145	slot->mmc = mmc;
2146	slot->host = host;
2147	host->slot[id] = slot;
2148
2149	slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2150
2151	mmc->ops = &dw_mci_ops;
2152	if (of_property_read_u32_array(host->dev->of_node,
2153				       "clock-freq-min-max", freq, 2)) {
2154		mmc->f_min = DW_MCI_FREQ_MIN;
2155		mmc->f_max = DW_MCI_FREQ_MAX;
2156	} else {
2157		mmc->f_min = freq[0];
2158		mmc->f_max = freq[1];
2159	}
2160
2161	if (host->pdata->get_ocr)
2162		mmc->ocr_avail = host->pdata->get_ocr(id);
2163	else
2164		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2165
2166	/*
2167	 * Start with slot power disabled, it will be enabled when a card
2168	 * is detected.
2169	 */
2170	if (host->pdata->setpower)
2171		host->pdata->setpower(id, 0);
2172
2173	if (host->pdata->caps)
2174		mmc->caps = host->pdata->caps;
2175
2176	if (host->pdata->pm_caps)
2177		mmc->pm_caps = host->pdata->pm_caps;
2178
2179	if (host->dev->of_node) {
2180		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2181		if (ctrl_id < 0)
2182			ctrl_id = 0;
2183	} else {
2184		ctrl_id = to_platform_device(host->dev)->id;
2185	}
2186	if (drv_data && drv_data->caps)
2187		mmc->caps |= drv_data->caps[ctrl_id];
2188
2189	if (host->pdata->caps2)
2190		mmc->caps2 = host->pdata->caps2;
2191
2192	if (host->pdata->get_bus_wd)
2193		bus_width = host->pdata->get_bus_wd(slot->id);
2194	else if (host->dev->of_node)
2195		bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2196	else
2197		bus_width = 1;
2198
2199	switch (bus_width) {
2200	case 8:
2201		mmc->caps |= MMC_CAP_8_BIT_DATA;
2202	case 4:
2203		mmc->caps |= MMC_CAP_4_BIT_DATA;
2204	}
2205
2206	if (host->pdata->blk_settings) {
2207		mmc->max_segs = host->pdata->blk_settings->max_segs;
2208		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2209		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2210		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2211		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2212	} else {
2213		/* Useful defaults if platform data is unset. */
2214#ifdef CONFIG_MMC_DW_IDMAC
2215		mmc->max_segs = host->ring_size;
2216		mmc->max_blk_size = 65536;
2217		mmc->max_blk_count = host->ring_size;
2218		mmc->max_seg_size = 0x1000;
2219		mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2220#else
 
 
 
 
 
 
 
 
 
2221		mmc->max_segs = 64;
2222		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2223		mmc->max_blk_count = 512;
2224		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
 
2225		mmc->max_seg_size = mmc->max_req_size;
2226#endif /* CONFIG_MMC_DW_IDMAC */
2227	}
2228
2229	slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2230	dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
2231
2232	ret = mmc_add_host(mmc);
2233	if (ret)
2234		goto err_setup_bus;
2235
2236#if defined(CONFIG_DEBUG_FS)
2237	dw_mci_init_debugfs(slot);
2238#endif
2239
2240	/* Card initially undetected */
2241	slot->last_detect_state = 0;
2242
2243	return 0;
2244
2245err_setup_bus:
2246	mmc_free_host(mmc);
2247	return -EINVAL;
2248}
2249
2250static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2251{
2252	/* Shutdown detect IRQ */
2253	if (slot->host->pdata->exit)
2254		slot->host->pdata->exit(id);
2255
2256	/* Debugfs stuff is cleaned up by mmc core */
2257	mmc_remove_host(slot->mmc);
2258	slot->host->slot[id] = NULL;
2259	mmc_free_host(slot->mmc);
2260}
2261
2262static void dw_mci_init_dma(struct dw_mci *host)
2263{
2264	/* Alloc memory for sg translation */
2265	host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2266					  &host->sg_dma, GFP_KERNEL);
2267	if (!host->sg_cpu) {
2268		dev_err(host->dev, "%s: could not alloc DMA memory\n",
2269			__func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2270		goto no_dma;
2271	}
2272
2273	/* Determine which DMA interface to use */
2274#ifdef CONFIG_MMC_DW_IDMAC
2275	host->dma_ops = &dw_mci_idmac_ops;
2276	dev_info(host->dev, "Using internal DMA controller.\n");
2277#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2278
2279	if (!host->dma_ops)
2280		goto no_dma;
 
 
 
 
 
 
 
 
 
2281
2282	if (host->dma_ops->init && host->dma_ops->start &&
2283	    host->dma_ops->stop && host->dma_ops->cleanup) {
2284		if (host->dma_ops->init(host)) {
2285			dev_err(host->dev, "%s: Unable to initialize "
2286				"DMA Controller.\n", __func__);
2287			goto no_dma;
2288		}
2289	} else {
2290		dev_err(host->dev, "DMA initialization not found.\n");
2291		goto no_dma;
2292	}
2293
2294	host->use_dma = 1;
2295	return;
2296
2297no_dma:
2298	dev_info(host->dev, "Using PIO mode.\n");
2299	host->use_dma = 0;
2300	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
2301}
2302
2303static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2304{
2305	unsigned long timeout = jiffies + msecs_to_jiffies(500);
2306	u32 ctrl;
 
2307
2308	ctrl = mci_readl(host, CTRL);
2309	ctrl |= reset;
2310	mci_writel(host, CTRL, ctrl);
2311
2312	/* wait till resets clear */
2313	do {
2314		ctrl = mci_readl(host, CTRL);
2315		if (!(ctrl & reset))
2316			return true;
2317	} while (time_before(jiffies, timeout));
 
 
 
 
 
 
 
 
 
 
 
 
 
2318
2319	dev_err(host->dev,
2320		"Timeout resetting block (ctrl reset %#x)\n",
2321		ctrl & reset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2322
2323	return false;
 
2324}
2325
2326static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2327{
 
 
 
 
 
 
2328	/*
2329	 * Reseting generates a block interrupt, hence setting
2330	 * the scatter-gather pointer to NULL.
2331	 */
2332	if (host->sg) {
2333		sg_miter_stop(&host->sg_miter);
2334		host->sg = NULL;
 
 
 
 
 
 
 
2335	}
2336
2337	return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2338}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2339
2340static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2341{
2342	return dw_mci_ctrl_reset(host,
2343				 SDMMC_CTRL_FIFO_RESET |
2344				 SDMMC_CTRL_RESET |
2345				 SDMMC_CTRL_DMA_RESET);
2346}
2347
2348#ifdef CONFIG_OF
2349static struct dw_mci_of_quirks {
2350	char *quirk;
2351	int id;
2352} of_quirks[] = {
2353	{
2354		.quirk	= "broken-cd",
2355		.id	= DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2356	},
2357};
2358
2359static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2360{
2361	struct dw_mci_board *pdata;
2362	struct device *dev = host->dev;
2363	struct device_node *np = dev->of_node;
2364	const struct dw_mci_drv_data *drv_data = host->drv_data;
2365	int idx, ret;
2366	u32 clock_frequency;
2367
2368	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2369	if (!pdata) {
2370		dev_err(dev, "could not allocate memory for pdata\n");
2371		return ERR_PTR(-ENOMEM);
2372	}
2373
2374	/* find out number of slots supported */
2375	if (of_property_read_u32(dev->of_node, "num-slots",
2376				&pdata->num_slots)) {
2377		dev_info(dev, "num-slots property not found, "
2378				"assuming 1 slot is available\n");
2379		pdata->num_slots = 1;
2380	}
2381
2382	/* get quirks */
2383	for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2384		if (of_get_property(np, of_quirks[idx].quirk, NULL))
2385			pdata->quirks |= of_quirks[idx].id;
2386
2387	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2388		dev_info(dev, "fifo-depth property not found, using "
2389				"value of FIFOTH register as default\n");
2390
2391	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
 
2392
2393	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2394		pdata->bus_hz = clock_frequency;
2395
2396	if (drv_data && drv_data->parse_dt) {
2397		ret = drv_data->parse_dt(host);
2398		if (ret)
2399			return ERR_PTR(ret);
2400	}
2401
2402	if (of_find_property(np, "keep-power-in-suspend", NULL))
2403		pdata->pm_caps |= MMC_PM_KEEP_POWER;
2404
2405	if (of_find_property(np, "enable-sdio-wakeup", NULL))
2406		pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2407
2408	if (of_find_property(np, "supports-highspeed", NULL))
2409		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2410
2411	if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2412		pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2413
2414	if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2415		pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2416
2417	if (of_get_property(np, "cd-inverted", NULL))
2418		pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
2419
2420	return pdata;
2421}
2422
2423#else /* CONFIG_OF */
2424static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2425{
2426	return ERR_PTR(-EINVAL);
2427}
2428#endif /* CONFIG_OF */
2429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2430int dw_mci_probe(struct dw_mci *host)
2431{
2432	const struct dw_mci_drv_data *drv_data = host->drv_data;
2433	int width, i, ret = 0;
2434	u32 fifo_size;
2435	int init_slots = 0;
2436
2437	if (!host->pdata) {
2438		host->pdata = dw_mci_parse_dt(host);
2439		if (IS_ERR(host->pdata)) {
2440			dev_err(host->dev, "platform data not available\n");
2441			return -EINVAL;
2442		}
2443	}
2444
2445	if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2446		dev_err(host->dev,
2447			"Platform data must supply select_slot function\n");
2448		return -ENODEV;
2449	}
2450
2451	host->biu_clk = devm_clk_get(host->dev, "biu");
2452	if (IS_ERR(host->biu_clk)) {
2453		dev_dbg(host->dev, "biu clock not available\n");
2454	} else {
2455		ret = clk_prepare_enable(host->biu_clk);
2456		if (ret) {
2457			dev_err(host->dev, "failed to enable biu clock\n");
2458			return ret;
2459		}
2460	}
2461
2462	host->ciu_clk = devm_clk_get(host->dev, "ciu");
2463	if (IS_ERR(host->ciu_clk)) {
2464		dev_dbg(host->dev, "ciu clock not available\n");
2465		host->bus_hz = host->pdata->bus_hz;
2466	} else {
2467		ret = clk_prepare_enable(host->ciu_clk);
2468		if (ret) {
2469			dev_err(host->dev, "failed to enable ciu clock\n");
2470			goto err_clk_biu;
2471		}
2472
2473		if (host->pdata->bus_hz) {
2474			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2475			if (ret)
2476				dev_warn(host->dev,
2477					 "Unable to set bus rate to %ul\n",
2478					 host->pdata->bus_hz);
2479		}
2480		host->bus_hz = clk_get_rate(host->ciu_clk);
2481	}
2482
 
 
 
 
 
 
 
 
 
 
 
 
 
2483	if (drv_data && drv_data->init) {
2484		ret = drv_data->init(host);
2485		if (ret) {
2486			dev_err(host->dev,
2487				"implementation specific init failed\n");
2488			goto err_clk_ciu;
2489		}
2490	}
2491
2492	if (drv_data && drv_data->setup_clock) {
2493		ret = drv_data->setup_clock(host);
2494		if (ret) {
2495			dev_err(host->dev,
2496				"implementation specific clock setup failed\n");
2497			goto err_clk_ciu;
2498		}
2499	}
2500
2501	host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2502	if (IS_ERR(host->vmmc)) {
2503		ret = PTR_ERR(host->vmmc);
2504		if (ret == -EPROBE_DEFER)
2505			goto err_clk_ciu;
2506
2507		dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2508		host->vmmc = NULL;
2509	} else {
2510		ret = regulator_enable(host->vmmc);
2511		if (ret) {
2512			if (ret != -EPROBE_DEFER)
2513				dev_err(host->dev,
2514					"regulator_enable fail: %d\n", ret);
2515			goto err_clk_ciu;
2516		}
2517	}
2518
2519	if (!host->bus_hz) {
2520		dev_err(host->dev,
2521			"Platform data must supply bus speed\n");
2522		ret = -ENODEV;
2523		goto err_regulator;
2524	}
2525
2526	host->quirks = host->pdata->quirks;
2527
2528	spin_lock_init(&host->lock);
 
2529	INIT_LIST_HEAD(&host->queue);
2530
 
 
2531	/*
2532	 * Get the host data width - this assumes that HCON has been set with
2533	 * the correct values.
2534	 */
2535	i = (mci_readl(host, HCON) >> 7) & 0x7;
2536	if (!i) {
2537		host->push_data = dw_mci_push_data16;
2538		host->pull_data = dw_mci_pull_data16;
2539		width = 16;
2540		host->data_shift = 1;
2541	} else if (i == 2) {
2542		host->push_data = dw_mci_push_data64;
2543		host->pull_data = dw_mci_pull_data64;
2544		width = 64;
2545		host->data_shift = 3;
2546	} else {
2547		/* Check for a reserved value, and warn if it is */
2548		WARN((i != 1),
2549		     "HCON reports a reserved host data width!\n"
2550		     "Defaulting to 32-bit access.\n");
2551		host->push_data = dw_mci_push_data32;
2552		host->pull_data = dw_mci_pull_data32;
2553		width = 32;
2554		host->data_shift = 2;
2555	}
2556
2557	/* Reset all blocks */
2558	if (!dw_mci_ctrl_all_reset(host))
2559		return -ENODEV;
 
 
2560
2561	host->dma_ops = host->pdata->dma_ops;
2562	dw_mci_init_dma(host);
2563
2564	/* Clear the interrupts for the host controller */
2565	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2566	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2567
2568	/* Put in max timeout */
2569	mci_writel(host, TMOUT, 0xFFFFFFFF);
2570
2571	/*
2572	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2573	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
2574	 */
2575	if (!host->pdata->fifo_depth) {
2576		/*
2577		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2578		 * have been overwritten by the bootloader, just like we're
2579		 * about to do, so if you know the value for your hardware, you
2580		 * should put it in the platform data.
2581		 */
2582		fifo_size = mci_readl(host, FIFOTH);
2583		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2584	} else {
2585		fifo_size = host->pdata->fifo_depth;
2586	}
2587	host->fifo_depth = fifo_size;
2588	host->fifoth_val =
2589		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2590	mci_writel(host, FIFOTH, host->fifoth_val);
2591
2592	/* disable clock to CIU */
2593	mci_writel(host, CLKENA, 0);
2594	mci_writel(host, CLKSRC, 0);
2595
2596	/*
2597	 * In 2.40a spec, Data offset is changed.
2598	 * Need to check the version-id and set data-offset for DATA register.
2599	 */
2600	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2601	dev_info(host->dev, "Version ID is %04x\n", host->verid);
2602
2603	if (host->verid < DW_MMC_240A)
2604		host->data_offset = DATA_OFFSET;
 
 
2605	else
2606		host->data_offset = DATA_240A_OFFSET;
2607
2608	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2609	host->card_workqueue = alloc_workqueue("dw-mci-card",
2610			WQ_MEM_RECLAIM, 1);
2611	if (!host->card_workqueue) {
2612		ret = -ENOMEM;
2613		goto err_dmaunmap;
2614	}
2615	INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2616	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2617			       host->irq_flags, "dw-mci", host);
2618	if (ret)
2619		goto err_workqueue;
2620
2621	if (host->pdata->num_slots)
2622		host->num_slots = host->pdata->num_slots;
2623	else
2624		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2625
2626	/*
2627	 * Enable interrupts for command done, data over, data empty, card det,
2628	 * receive ready and error such as transmit, receive timeout, crc error
2629	 */
2630	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2631	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2632		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2633		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2634	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
 
2635
2636	dev_info(host->dev, "DW MMC controller at irq %d, "
2637		 "%d bit host data width, "
2638		 "%u deep fifo\n",
2639		 host->irq, width, fifo_size);
2640
2641	/* We need at least one slot to succeed */
2642	for (i = 0; i < host->num_slots; i++) {
2643		ret = dw_mci_init_slot(host, i);
2644		if (ret)
2645			dev_dbg(host->dev, "slot %d init failed\n", i);
2646		else
2647			init_slots++;
2648	}
2649
2650	if (init_slots) {
2651		dev_info(host->dev, "%d slots initialized\n", init_slots);
2652	} else {
2653		dev_dbg(host->dev, "attempted to initialize %d slots, "
2654					"but failed on all\n", host->num_slots);
2655		goto err_workqueue;
2656	}
2657
2658	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2659		dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2660
2661	return 0;
2662
2663err_workqueue:
2664	destroy_workqueue(host->card_workqueue);
2665
2666err_dmaunmap:
2667	if (host->use_dma && host->dma_ops->exit)
2668		host->dma_ops->exit(host);
2669
2670err_regulator:
2671	if (host->vmmc)
2672		regulator_disable(host->vmmc);
2673
2674err_clk_ciu:
2675	if (!IS_ERR(host->ciu_clk))
2676		clk_disable_unprepare(host->ciu_clk);
2677
2678err_clk_biu:
2679	if (!IS_ERR(host->biu_clk))
2680		clk_disable_unprepare(host->biu_clk);
2681
2682	return ret;
2683}
2684EXPORT_SYMBOL(dw_mci_probe);
2685
2686void dw_mci_remove(struct dw_mci *host)
2687{
2688	int i;
 
 
2689
2690	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2691	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2692
2693	for (i = 0; i < host->num_slots; i++) {
2694		dev_dbg(host->dev, "remove slot %d\n", i);
2695		if (host->slot[i])
2696			dw_mci_cleanup_slot(host->slot[i], i);
2697	}
2698
2699	/* disable clock to CIU */
2700	mci_writel(host, CLKENA, 0);
2701	mci_writel(host, CLKSRC, 0);
2702
2703	destroy_workqueue(host->card_workqueue);
2704
2705	if (host->use_dma && host->dma_ops->exit)
2706		host->dma_ops->exit(host);
2707
2708	if (host->vmmc)
2709		regulator_disable(host->vmmc);
2710
2711	if (!IS_ERR(host->ciu_clk))
2712		clk_disable_unprepare(host->ciu_clk);
2713
2714	if (!IS_ERR(host->biu_clk))
2715		clk_disable_unprepare(host->biu_clk);
2716}
2717EXPORT_SYMBOL(dw_mci_remove);
2718
2719
2720
2721#ifdef CONFIG_PM_SLEEP
2722/*
2723 * TODO: we should probably disable the clock to the card in the suspend path.
2724 */
2725int dw_mci_suspend(struct dw_mci *host)
2726{
2727	if (host->vmmc)
2728		regulator_disable(host->vmmc);
 
 
 
 
 
 
 
 
 
2729
2730	return 0;
2731}
2732EXPORT_SYMBOL(dw_mci_suspend);
2733
2734int dw_mci_resume(struct dw_mci *host)
2735{
2736	int i, ret;
 
2737
2738	if (host->vmmc) {
2739		ret = regulator_enable(host->vmmc);
2740		if (ret) {
2741			dev_err(host->dev,
2742				"failed to enable regulator: %d\n", ret);
2743			return ret;
2744		}
2745	}
2746
2747	if (!dw_mci_ctrl_all_reset(host)) {
 
 
 
 
 
2748		ret = -ENODEV;
2749		return ret;
2750	}
2751
2752	if (host->use_dma && host->dma_ops->init)
2753		host->dma_ops->init(host);
2754
2755	/*
2756	 * Restore the initial value at FIFOTH register
2757	 * And Invalidate the prev_blksz with zero
2758	 */
2759	mci_writel(host, FIFOTH, host->fifoth_val);
2760	host->prev_blksz = 0;
2761
2762	/* Put in max timeout */
2763	mci_writel(host, TMOUT, 0xFFFFFFFF);
2764
2765	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2766	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2767		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2768		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2769	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2770
2771	for (i = 0; i < host->num_slots; i++) {
2772		struct dw_mci_slot *slot = host->slot[i];
2773		if (!slot)
2774			continue;
2775		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2776			dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2777			dw_mci_setup_bus(slot, true);
2778		}
2779	}
 
 
 
 
 
2780	return 0;
 
 
 
 
 
 
 
 
2781}
2782EXPORT_SYMBOL(dw_mci_resume);
2783#endif /* CONFIG_PM_SLEEP */
2784
2785static int __init dw_mci_init(void)
2786{
2787	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2788	return 0;
2789}
2790
2791static void __exit dw_mci_exit(void)
2792{
2793}
2794
2795module_init(dw_mci_init);
2796module_exit(dw_mci_exit);
2797
2798MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2799MODULE_AUTHOR("NXP Semiconductor VietNam");
2800MODULE_AUTHOR("Imagination Technologies Ltd");
2801MODULE_LICENSE("GPL v2");