Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v4.6
   1/*
   2 * Synopsys DesignWare Multimedia Card Interface driver
   3 *  (Based on NXP driver for lpc 31xx)
   4 *
   5 * Copyright (C) 2009 NXP Semiconductors
   6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14#include <linux/blkdev.h>
  15#include <linux/clk.h>
  16#include <linux/debugfs.h>
  17#include <linux/device.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/err.h>
  20#include <linux/init.h>
  21#include <linux/interrupt.h>
  22#include <linux/ioport.h>
  23#include <linux/module.h>
  24#include <linux/platform_device.h>
  25#include <linux/seq_file.h>
  26#include <linux/slab.h>
  27#include <linux/stat.h>
  28#include <linux/delay.h>
  29#include <linux/irq.h>
  30#include <linux/mmc/card.h>
  31#include <linux/mmc/host.h>
  32#include <linux/mmc/mmc.h>
  33#include <linux/mmc/sd.h>
  34#include <linux/mmc/sdio.h>
  35#include <linux/mmc/dw_mmc.h>
  36#include <linux/bitops.h>
  37#include <linux/regulator/consumer.h>
 
  38#include <linux/of.h>
  39#include <linux/of_gpio.h>
  40#include <linux/mmc/slot-gpio.h>
  41
  42#include "dw_mmc.h"
  43
  44/* Common flag combinations */
  45#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
  46				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
  47				 SDMMC_INT_EBE)
  48#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  49				 SDMMC_INT_RESP_ERR)
  50#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
  51				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
  52#define DW_MCI_SEND_STATUS	1
  53#define DW_MCI_RECV_STATUS	2
  54#define DW_MCI_DMA_THRESHOLD	16
  55
  56#define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
  57#define DW_MCI_FREQ_MIN	400000		/* unit: HZ */
  58
 
  59#define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
  60				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
  61				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
  62				 SDMMC_IDMAC_INT_TI)
  63
  64struct idmac_desc_64addr {
  65	u32		des0;	/* Control Descriptor */
  66
  67	u32		des1;	/* Reserved */
  68
  69	u32		des2;	/*Buffer sizes */
  70#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
  71	((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
  72	 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
  73
  74	u32		des3;	/* Reserved */
  75
  76	u32		des4;	/* Lower 32-bits of Buffer Address Pointer 1*/
  77	u32		des5;	/* Upper 32-bits of Buffer Address Pointer 1*/
  78
  79	u32		des6;	/* Lower 32-bits of Next Descriptor Address */
  80	u32		des7;	/* Upper 32-bits of Next Descriptor Address */
  81};
  82
  83struct idmac_desc {
  84	__le32		des0;	/* Control Descriptor */
  85#define IDMAC_DES0_DIC	BIT(1)
  86#define IDMAC_DES0_LD	BIT(2)
  87#define IDMAC_DES0_FD	BIT(3)
  88#define IDMAC_DES0_CH	BIT(4)
  89#define IDMAC_DES0_ER	BIT(5)
  90#define IDMAC_DES0_CES	BIT(30)
  91#define IDMAC_DES0_OWN	BIT(31)
  92
  93	__le32		des1;	/* Buffer sizes */
  94#define IDMAC_SET_BUFFER1_SIZE(d, s) \
  95	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
  96
  97	__le32		des2;	/* buffer 1 physical address */
  98
  99	__le32		des3;	/* buffer 2 physical address */
 100};
 
 101
 102/* Each descriptor can transfer up to 4KB of data in chained mode */
 103#define DW_MCI_DESC_DATA_LENGTH	0x1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 104
 105static bool dw_mci_reset(struct dw_mci *host);
 106static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
 107static int dw_mci_card_busy(struct mmc_host *mmc);
 108
 109#if defined(CONFIG_DEBUG_FS)
 110static int dw_mci_req_show(struct seq_file *s, void *v)
 111{
 112	struct dw_mci_slot *slot = s->private;
 113	struct mmc_request *mrq;
 114	struct mmc_command *cmd;
 115	struct mmc_command *stop;
 116	struct mmc_data	*data;
 117
 118	/* Make sure we get a consistent snapshot */
 119	spin_lock_bh(&slot->host->lock);
 120	mrq = slot->mrq;
 121
 122	if (mrq) {
 123		cmd = mrq->cmd;
 124		data = mrq->data;
 125		stop = mrq->stop;
 126
 127		if (cmd)
 128			seq_printf(s,
 129				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 130				   cmd->opcode, cmd->arg, cmd->flags,
 131				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
 132				   cmd->resp[2], cmd->error);
 133		if (data)
 134			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
 135				   data->bytes_xfered, data->blocks,
 136				   data->blksz, data->flags, data->error);
 137		if (stop)
 138			seq_printf(s,
 139				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 140				   stop->opcode, stop->arg, stop->flags,
 141				   stop->resp[0], stop->resp[1], stop->resp[2],
 142				   stop->resp[2], stop->error);
 143	}
 144
 145	spin_unlock_bh(&slot->host->lock);
 146
 147	return 0;
 148}
 149
 150static int dw_mci_req_open(struct inode *inode, struct file *file)
 151{
 152	return single_open(file, dw_mci_req_show, inode->i_private);
 153}
 154
 155static const struct file_operations dw_mci_req_fops = {
 156	.owner		= THIS_MODULE,
 157	.open		= dw_mci_req_open,
 158	.read		= seq_read,
 159	.llseek		= seq_lseek,
 160	.release	= single_release,
 161};
 162
 163static int dw_mci_regs_show(struct seq_file *s, void *v)
 164{
 165	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
 166	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
 167	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
 168	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
 169	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
 170	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
 171
 172	return 0;
 173}
 174
 175static int dw_mci_regs_open(struct inode *inode, struct file *file)
 176{
 177	return single_open(file, dw_mci_regs_show, inode->i_private);
 178}
 179
 180static const struct file_operations dw_mci_regs_fops = {
 181	.owner		= THIS_MODULE,
 182	.open		= dw_mci_regs_open,
 183	.read		= seq_read,
 184	.llseek		= seq_lseek,
 185	.release	= single_release,
 186};
 187
 188static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
 189{
 190	struct mmc_host	*mmc = slot->mmc;
 191	struct dw_mci *host = slot->host;
 192	struct dentry *root;
 193	struct dentry *node;
 194
 195	root = mmc->debugfs_root;
 196	if (!root)
 197		return;
 198
 199	node = debugfs_create_file("regs", S_IRUSR, root, host,
 200				   &dw_mci_regs_fops);
 201	if (!node)
 202		goto err;
 203
 204	node = debugfs_create_file("req", S_IRUSR, root, slot,
 205				   &dw_mci_req_fops);
 206	if (!node)
 207		goto err;
 208
 209	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
 210	if (!node)
 211		goto err;
 212
 213	node = debugfs_create_x32("pending_events", S_IRUSR, root,
 214				  (u32 *)&host->pending_events);
 215	if (!node)
 216		goto err;
 217
 218	node = debugfs_create_x32("completed_events", S_IRUSR, root,
 219				  (u32 *)&host->completed_events);
 220	if (!node)
 221		goto err;
 222
 223	return;
 224
 225err:
 226	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
 227}
 228#endif /* defined(CONFIG_DEBUG_FS) */
 229
 230static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
 
 
 
 
 231
 232static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
 233{
 234	struct mmc_data	*data;
 235	struct dw_mci_slot *slot = mmc_priv(mmc);
 236	struct dw_mci *host = slot->host;
 237	u32 cmdr;
 238
 239	cmd->error = -EINPROGRESS;
 
 240	cmdr = cmd->opcode;
 241
 242	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
 243	    cmd->opcode == MMC_GO_IDLE_STATE ||
 244	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
 245	    (cmd->opcode == SD_IO_RW_DIRECT &&
 246	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
 247		cmdr |= SDMMC_CMD_STOP;
 248	else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
 249		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
 250
 251	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
 252		u32 clk_en_a;
 253
 254		/* Special bit makes CMD11 not die */
 255		cmdr |= SDMMC_CMD_VOLT_SWITCH;
 256
 257		/* Change state to continue to handle CMD11 weirdness */
 258		WARN_ON(slot->host->state != STATE_SENDING_CMD);
 259		slot->host->state = STATE_SENDING_CMD11;
 260
 261		/*
 262		 * We need to disable low power mode (automatic clock stop)
 263		 * while doing voltage switch so we don't confuse the card,
 264		 * since stopping the clock is a specific part of the UHS
 265		 * voltage change dance.
 266		 *
 267		 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
 268		 * unconditionally turned back on in dw_mci_setup_bus() if it's
 269		 * ever called with a non-zero clock.  That shouldn't happen
 270		 * until the voltage change is all done.
 271		 */
 272		clk_en_a = mci_readl(host, CLKENA);
 273		clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
 274		mci_writel(host, CLKENA, clk_en_a);
 275		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
 276			     SDMMC_CMD_PRV_DAT_WAIT, 0);
 277	}
 278
 279	if (cmd->flags & MMC_RSP_PRESENT) {
 280		/* We expect a response, so set this bit */
 281		cmdr |= SDMMC_CMD_RESP_EXP;
 282		if (cmd->flags & MMC_RSP_136)
 283			cmdr |= SDMMC_CMD_RESP_LONG;
 284	}
 285
 286	if (cmd->flags & MMC_RSP_CRC)
 287		cmdr |= SDMMC_CMD_RESP_CRC;
 288
 289	data = cmd->data;
 290	if (data) {
 291		cmdr |= SDMMC_CMD_DAT_EXP;
 
 
 292		if (data->flags & MMC_DATA_WRITE)
 293			cmdr |= SDMMC_CMD_DAT_WR;
 294	}
 295
 296	if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
 297		cmdr |= SDMMC_CMD_USE_HOLD_REG;
 298
 299	return cmdr;
 300}
 301
 302static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
 303{
 304	struct mmc_command *stop;
 305	u32 cmdr;
 306
 307	if (!cmd->data)
 308		return 0;
 309
 310	stop = &host->stop_abort;
 311	cmdr = cmd->opcode;
 312	memset(stop, 0, sizeof(struct mmc_command));
 313
 314	if (cmdr == MMC_READ_SINGLE_BLOCK ||
 315	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
 316	    cmdr == MMC_WRITE_BLOCK ||
 317	    cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
 318	    cmdr == MMC_SEND_TUNING_BLOCK ||
 319	    cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
 320		stop->opcode = MMC_STOP_TRANSMISSION;
 321		stop->arg = 0;
 322		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 323	} else if (cmdr == SD_IO_RW_EXTENDED) {
 324		stop->opcode = SD_IO_RW_DIRECT;
 325		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
 326			     ((cmd->arg >> 28) & 0x7);
 327		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
 328	} else {
 329		return 0;
 330	}
 331
 332	cmdr = stop->opcode | SDMMC_CMD_STOP |
 333		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
 334
 335	return cmdr;
 336}
 337
 338static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
 339{
 340	unsigned long timeout = jiffies + msecs_to_jiffies(500);
 341
 342	/*
 343	 * Databook says that before issuing a new data transfer command
 344	 * we need to check to see if the card is busy.  Data transfer commands
 345	 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
 346	 *
 347	 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
 348	 * expected.
 349	 */
 350	if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
 351	    !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
 352		while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
 353			if (time_after(jiffies, timeout)) {
 354				/* Command will fail; we'll pass error then */
 355				dev_err(host->dev, "Busy; trying anyway\n");
 356				break;
 357			}
 358			udelay(10);
 359		}
 360	}
 361}
 362
 363static void dw_mci_start_command(struct dw_mci *host,
 364				 struct mmc_command *cmd, u32 cmd_flags)
 365{
 366	host->cmd = cmd;
 367	dev_vdbg(host->dev,
 368		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
 369		 cmd->arg, cmd_flags);
 370
 371	mci_writel(host, CMDARG, cmd->arg);
 372	wmb(); /* drain writebuffer */
 373	dw_mci_wait_while_busy(host, cmd_flags);
 374
 375	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
 376}
 377
 378static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
 379{
 380	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
 381
 382	dw_mci_start_command(host, stop, host->stop_cmdr);
 383}
 384
 385/* DMA interface functions */
 386static void dw_mci_stop_dma(struct dw_mci *host)
 387{
 388	if (host->using_dma) {
 389		host->dma_ops->stop(host);
 390		host->dma_ops->cleanup(host);
 391	}
 392
 393	/* Data transfer was stopped by the interrupt handler */
 394	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 395}
 396
 397static int dw_mci_get_dma_dir(struct mmc_data *data)
 398{
 399	if (data->flags & MMC_DATA_WRITE)
 400		return DMA_TO_DEVICE;
 401	else
 402		return DMA_FROM_DEVICE;
 403}
 404
 
 405static void dw_mci_dma_cleanup(struct dw_mci *host)
 406{
 407	struct mmc_data *data = host->data;
 408
 409	if (data)
 410		if (!data->host_cookie)
 411			dma_unmap_sg(host->dev,
 412				     data->sg,
 413				     data->sg_len,
 414				     dw_mci_get_dma_dir(data));
 415}
 416
 417static void dw_mci_idmac_reset(struct dw_mci *host)
 418{
 419	u32 bmod = mci_readl(host, BMOD);
 420	/* Software reset of DMA */
 421	bmod |= SDMMC_IDMAC_SWRESET;
 422	mci_writel(host, BMOD, bmod);
 423}
 424
 425static void dw_mci_idmac_stop_dma(struct dw_mci *host)
 426{
 427	u32 temp;
 428
 429	/* Disable and reset the IDMAC interface */
 430	temp = mci_readl(host, CTRL);
 431	temp &= ~SDMMC_CTRL_USE_IDMAC;
 432	temp |= SDMMC_CTRL_DMA_RESET;
 433	mci_writel(host, CTRL, temp);
 434
 435	/* Stop the IDMAC running */
 436	temp = mci_readl(host, BMOD);
 437	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
 438	temp |= SDMMC_IDMAC_SWRESET;
 439	mci_writel(host, BMOD, temp);
 440}
 441
 442static void dw_mci_dmac_complete_dma(void *arg)
 443{
 444	struct dw_mci *host = arg;
 445	struct mmc_data *data = host->data;
 446
 447	dev_vdbg(host->dev, "DMA complete\n");
 448
 449	if ((host->use_dma == TRANS_MODE_EDMAC) &&
 450	    data && (data->flags & MMC_DATA_READ))
 451		/* Invalidate cache after read */
 452		dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
 453				    data->sg,
 454				    data->sg_len,
 455				    DMA_FROM_DEVICE);
 456
 457	host->dma_ops->cleanup(host);
 458
 459	/*
 460	 * If the card was removed, data will be NULL. No point in trying to
 461	 * send the stop command or waiting for NBUSY in this case.
 462	 */
 463	if (data) {
 464		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 465		tasklet_schedule(&host->tasklet);
 466	}
 467}
 468
 469static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
 470				    unsigned int sg_len)
 471{
 472	unsigned int desc_len;
 473	int i;
 
 474
 475	if (host->dma_64bit_address == 1) {
 476		struct idmac_desc_64addr *desc_first, *desc_last, *desc;
 477
 478		desc_first = desc_last = desc = host->sg_cpu;
 479
 480		for (i = 0; i < sg_len; i++) {
 481			unsigned int length = sg_dma_len(&data->sg[i]);
 482
 483			u64 mem_addr = sg_dma_address(&data->sg[i]);
 484
 485			for ( ; length ; desc++) {
 486				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
 487					   length : DW_MCI_DESC_DATA_LENGTH;
 488
 489				length -= desc_len;
 490
 491				/*
 492				 * Set the OWN bit and disable interrupts
 493				 * for this descriptor
 494				 */
 495				desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
 496							IDMAC_DES0_CH;
 497
 498				/* Buffer length */
 499				IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
 500
 501				/* Physical address to DMA to/from */
 502				desc->des4 = mem_addr & 0xffffffff;
 503				desc->des5 = mem_addr >> 32;
 504
 505				/* Update physical address for the next desc */
 506				mem_addr += desc_len;
 507
 508				/* Save pointer to the last descriptor */
 509				desc_last = desc;
 510			}
 511		}
 512
 513		/* Set first descriptor */
 514		desc_first->des0 |= IDMAC_DES0_FD;
 515
 516		/* Set last descriptor */
 517		desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
 518		desc_last->des0 |= IDMAC_DES0_LD;
 519
 520	} else {
 521		struct idmac_desc *desc_first, *desc_last, *desc;
 522
 523		desc_first = desc_last = desc = host->sg_cpu;
 524
 525		for (i = 0; i < sg_len; i++) {
 526			unsigned int length = sg_dma_len(&data->sg[i]);
 527
 528			u32 mem_addr = sg_dma_address(&data->sg[i]);
 529
 530			for ( ; length ; desc++) {
 531				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
 532					   length : DW_MCI_DESC_DATA_LENGTH;
 533
 534				length -= desc_len;
 535
 536				/*
 537				 * Set the OWN bit and disable interrupts
 538				 * for this descriptor
 539				 */
 540				desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
 541							 IDMAC_DES0_DIC |
 542							 IDMAC_DES0_CH);
 543
 544				/* Buffer length */
 545				IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
 546
 547				/* Physical address to DMA to/from */
 548				desc->des2 = cpu_to_le32(mem_addr);
 549
 550				/* Update physical address for the next desc */
 551				mem_addr += desc_len;
 552
 553				/* Save pointer to the last descriptor */
 554				desc_last = desc;
 555			}
 556		}
 557
 558		/* Set first descriptor */
 559		desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
 
 560
 561		/* Set last descriptor */
 562		desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
 563					       IDMAC_DES0_DIC));
 564		desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
 565	}
 566
 567	wmb(); /* drain writebuffer */
 568}
 569
 570static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 571{
 572	u32 temp;
 573
 574	dw_mci_translate_sglist(host, host->data, sg_len);
 575
 576	/* Make sure to reset DMA in case we did PIO before this */
 577	dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
 578	dw_mci_idmac_reset(host);
 579
 580	/* Select IDMAC interface */
 581	temp = mci_readl(host, CTRL);
 582	temp |= SDMMC_CTRL_USE_IDMAC;
 583	mci_writel(host, CTRL, temp);
 584
 585	/* drain writebuffer */
 586	wmb();
 587
 588	/* Enable the IDMAC */
 589	temp = mci_readl(host, BMOD);
 590	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
 591	mci_writel(host, BMOD, temp);
 592
 593	/* Start it running */
 594	mci_writel(host, PLDMND, 1);
 595
 596	return 0;
 597}
 598
 599static int dw_mci_idmac_init(struct dw_mci *host)
 600{
 
 601	int i;
 602
 603	if (host->dma_64bit_address == 1) {
 604		struct idmac_desc_64addr *p;
 605		/* Number of descriptors in the ring buffer */
 606		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
 607
 608		/* Forward link the descriptor list */
 609		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
 610								i++, p++) {
 611			p->des6 = (host->sg_dma +
 612					(sizeof(struct idmac_desc_64addr) *
 613							(i + 1))) & 0xffffffff;
 614
 615			p->des7 = (u64)(host->sg_dma +
 616					(sizeof(struct idmac_desc_64addr) *
 617							(i + 1))) >> 32;
 618			/* Initialize reserved and buffer size fields to "0" */
 619			p->des1 = 0;
 620			p->des2 = 0;
 621			p->des3 = 0;
 622		}
 623
 624		/* Set the last descriptor as the end-of-ring descriptor */
 625		p->des6 = host->sg_dma & 0xffffffff;
 626		p->des7 = (u64)host->sg_dma >> 32;
 627		p->des0 = IDMAC_DES0_ER;
 628
 629	} else {
 630		struct idmac_desc *p;
 631		/* Number of descriptors in the ring buffer */
 632		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
 633
 634		/* Forward link the descriptor list */
 635		for (i = 0, p = host->sg_cpu;
 636		     i < host->ring_size - 1;
 637		     i++, p++) {
 638			p->des3 = cpu_to_le32(host->sg_dma +
 639					(sizeof(struct idmac_desc) * (i + 1)));
 640			p->des1 = 0;
 641		}
 642
 643		/* Set the last descriptor as the end-of-ring descriptor */
 644		p->des3 = cpu_to_le32(host->sg_dma);
 645		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
 646	}
 647
 648	dw_mci_idmac_reset(host);
 649
 650	if (host->dma_64bit_address == 1) {
 651		/* Mask out interrupts - get Tx & Rx complete only */
 652		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
 653		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
 654				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
 655
 656		/* Set the descriptor base address */
 657		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
 658		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
 659
 660	} else {
 661		/* Mask out interrupts - get Tx & Rx complete only */
 662		mci_writel(host, IDSTS, IDMAC_INT_CLR);
 663		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
 664				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
 665
 666		/* Set the descriptor base address */
 667		mci_writel(host, DBADDR, host->sg_dma);
 668	}
 669
 
 
 670	return 0;
 671}
 672
 673static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
 674	.init = dw_mci_idmac_init,
 675	.start = dw_mci_idmac_start_dma,
 676	.stop = dw_mci_idmac_stop_dma,
 677	.complete = dw_mci_dmac_complete_dma,
 678	.cleanup = dw_mci_dma_cleanup,
 679};
 680
 681static void dw_mci_edmac_stop_dma(struct dw_mci *host)
 682{
 683	dmaengine_terminate_all(host->dms->ch);
 684}
 685
 686static int dw_mci_edmac_start_dma(struct dw_mci *host,
 687					    unsigned int sg_len)
 688{
 689	struct dma_slave_config cfg;
 690	struct dma_async_tx_descriptor *desc = NULL;
 691	struct scatterlist *sgl = host->data->sg;
 692	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 693	u32 sg_elems = host->data->sg_len;
 694	u32 fifoth_val;
 695	u32 fifo_offset = host->fifo_reg - host->regs;
 696	int ret = 0;
 697
 698	/* Set external dma config: burst size, burst width */
 699	cfg.dst_addr = host->phy_regs + fifo_offset;
 700	cfg.src_addr = cfg.dst_addr;
 701	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 702	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 703
 704	/* Match burst msize with external dma config */
 705	fifoth_val = mci_readl(host, FIFOTH);
 706	cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
 707	cfg.src_maxburst = cfg.dst_maxburst;
 708
 709	if (host->data->flags & MMC_DATA_WRITE)
 710		cfg.direction = DMA_MEM_TO_DEV;
 711	else
 712		cfg.direction = DMA_DEV_TO_MEM;
 713
 714	ret = dmaengine_slave_config(host->dms->ch, &cfg);
 715	if (ret) {
 716		dev_err(host->dev, "Failed to config edmac.\n");
 717		return -EBUSY;
 718	}
 719
 720	desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
 721				       sg_len, cfg.direction,
 722				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 723	if (!desc) {
 724		dev_err(host->dev, "Can't prepare slave sg.\n");
 725		return -EBUSY;
 726	}
 727
 728	/* Set dw_mci_dmac_complete_dma as callback */
 729	desc->callback = dw_mci_dmac_complete_dma;
 730	desc->callback_param = (void *)host;
 731	dmaengine_submit(desc);
 732
 733	/* Flush cache before write */
 734	if (host->data->flags & MMC_DATA_WRITE)
 735		dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
 736				       sg_elems, DMA_TO_DEVICE);
 737
 738	dma_async_issue_pending(host->dms->ch);
 739
 740	return 0;
 741}
 742
 743static int dw_mci_edmac_init(struct dw_mci *host)
 744{
 745	/* Request external dma channel */
 746	host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
 747	if (!host->dms)
 748		return -ENOMEM;
 749
 750	host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
 751	if (!host->dms->ch) {
 752		dev_err(host->dev, "Failed to get external DMA channel.\n");
 753		kfree(host->dms);
 754		host->dms = NULL;
 755		return -ENXIO;
 756	}
 757
 758	return 0;
 759}
 760
 761static void dw_mci_edmac_exit(struct dw_mci *host)
 762{
 763	if (host->dms) {
 764		if (host->dms->ch) {
 765			dma_release_channel(host->dms->ch);
 766			host->dms->ch = NULL;
 767		}
 768		kfree(host->dms);
 769		host->dms = NULL;
 770	}
 771}
 772
 773static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
 774	.init = dw_mci_edmac_init,
 775	.exit = dw_mci_edmac_exit,
 776	.start = dw_mci_edmac_start_dma,
 777	.stop = dw_mci_edmac_stop_dma,
 778	.complete = dw_mci_dmac_complete_dma,
 779	.cleanup = dw_mci_dma_cleanup,
 780};
 
 781
 782static int dw_mci_pre_dma_transfer(struct dw_mci *host,
 783				   struct mmc_data *data,
 784				   bool next)
 785{
 786	struct scatterlist *sg;
 787	unsigned int i, sg_len;
 788
 789	if (!next && data->host_cookie)
 790		return data->host_cookie;
 791
 792	/*
 793	 * We don't do DMA on "complex" transfers, i.e. with
 794	 * non-word-aligned buffers or lengths. Also, we don't bother
 795	 * with all the DMA setup overhead for short transfers.
 796	 */
 797	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
 798		return -EINVAL;
 799
 800	if (data->blksz & 3)
 801		return -EINVAL;
 802
 803	for_each_sg(data->sg, sg, data->sg_len, i) {
 804		if (sg->offset & 3 || sg->length & 3)
 805			return -EINVAL;
 806	}
 807
 808	sg_len = dma_map_sg(host->dev,
 809			    data->sg,
 810			    data->sg_len,
 811			    dw_mci_get_dma_dir(data));
 812	if (sg_len == 0)
 813		return -EINVAL;
 814
 815	if (next)
 816		data->host_cookie = sg_len;
 817
 818	return sg_len;
 819}
 820
 821static void dw_mci_pre_req(struct mmc_host *mmc,
 822			   struct mmc_request *mrq,
 823			   bool is_first_req)
 824{
 825	struct dw_mci_slot *slot = mmc_priv(mmc);
 826	struct mmc_data *data = mrq->data;
 827
 828	if (!slot->host->use_dma || !data)
 829		return;
 830
 831	if (data->host_cookie) {
 832		data->host_cookie = 0;
 833		return;
 834	}
 835
 836	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
 837		data->host_cookie = 0;
 838}
 839
 840static void dw_mci_post_req(struct mmc_host *mmc,
 841			    struct mmc_request *mrq,
 842			    int err)
 843{
 844	struct dw_mci_slot *slot = mmc_priv(mmc);
 845	struct mmc_data *data = mrq->data;
 846
 847	if (!slot->host->use_dma || !data)
 848		return;
 849
 850	if (data->host_cookie)
 851		dma_unmap_sg(slot->host->dev,
 852			     data->sg,
 853			     data->sg_len,
 854			     dw_mci_get_dma_dir(data));
 855	data->host_cookie = 0;
 856}
 857
 858static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
 859{
 
 860	unsigned int blksz = data->blksz;
 861	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 862	u32 fifo_width = 1 << host->data_shift;
 863	u32 blksz_depth = blksz / fifo_width, fifoth_val;
 864	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
 865	int idx = ARRAY_SIZE(mszs) - 1;
 866
 867	/* pio should ship this scenario */
 868	if (!host->use_dma)
 869		return;
 870
 871	tx_wmark = (host->fifo_depth) / 2;
 872	tx_wmark_invers = host->fifo_depth - tx_wmark;
 873
 874	/*
 875	 * MSIZE is '1',
 876	 * if blksz is not a multiple of the FIFO width
 877	 */
 878	if (blksz % fifo_width) {
 879		msize = 0;
 880		rx_wmark = 1;
 881		goto done;
 882	}
 883
 884	do {
 885		if (!((blksz_depth % mszs[idx]) ||
 886		     (tx_wmark_invers % mszs[idx]))) {
 887			msize = idx;
 888			rx_wmark = mszs[idx] - 1;
 889			break;
 890		}
 891	} while (--idx > 0);
 892	/*
 893	 * If idx is '0', it won't be tried
 894	 * Thus, initial values are uesed
 895	 */
 896done:
 897	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
 898	mci_writel(host, FIFOTH, fifoth_val);
 
 899}
 900
 901static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
 902{
 903	unsigned int blksz = data->blksz;
 904	u32 blksz_depth, fifo_depth;
 905	u16 thld_size;
 906
 907	WARN_ON(!(data->flags & MMC_DATA_READ));
 908
 909	/*
 910	 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
 911	 * in the FIFO region, so we really shouldn't access it).
 912	 */
 913	if (host->verid < DW_MMC_240A)
 914		return;
 915
 916	if (host->timing != MMC_TIMING_MMC_HS200 &&
 917	    host->timing != MMC_TIMING_MMC_HS400 &&
 918	    host->timing != MMC_TIMING_UHS_SDR104)
 919		goto disable;
 920
 921	blksz_depth = blksz / (1 << host->data_shift);
 922	fifo_depth = host->fifo_depth;
 923
 924	if (blksz_depth > fifo_depth)
 925		goto disable;
 926
 927	/*
 928	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
 929	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
 930	 * Currently just choose blksz.
 931	 */
 932	thld_size = blksz;
 933	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
 934	return;
 935
 936disable:
 937	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
 938}
 939
 940static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
 941{
 942	unsigned long irqflags;
 943	int sg_len;
 944	u32 temp;
 945
 946	host->using_dma = 0;
 947
 948	/* If we don't have a channel, we can't do DMA */
 949	if (!host->use_dma)
 950		return -ENODEV;
 951
 952	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
 953	if (sg_len < 0) {
 954		host->dma_ops->stop(host);
 955		return sg_len;
 956	}
 957
 958	host->using_dma = 1;
 959
 960	if (host->use_dma == TRANS_MODE_IDMAC)
 961		dev_vdbg(host->dev,
 962			 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
 963			 (unsigned long)host->sg_cpu,
 964			 (unsigned long)host->sg_dma,
 965			 sg_len);
 966
 967	/*
 968	 * Decide the MSIZE and RX/TX Watermark.
 969	 * If current block size is same with previous size,
 970	 * no need to update fifoth.
 971	 */
 972	if (host->prev_blksz != data->blksz)
 973		dw_mci_adjust_fifoth(host, data);
 974
 975	/* Enable the DMA interface */
 976	temp = mci_readl(host, CTRL);
 977	temp |= SDMMC_CTRL_DMA_ENABLE;
 978	mci_writel(host, CTRL, temp);
 979
 980	/* Disable RX/TX IRQs, let DMA handle it */
 981	spin_lock_irqsave(&host->irq_lock, irqflags);
 982	temp = mci_readl(host, INTMASK);
 983	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
 984	mci_writel(host, INTMASK, temp);
 985	spin_unlock_irqrestore(&host->irq_lock, irqflags);
 986
 987	if (host->dma_ops->start(host, sg_len)) {
 988		/* We can't do DMA */
 989		dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
 990		return -ENODEV;
 991	}
 992
 993	return 0;
 994}
 995
 996static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
 997{
 998	unsigned long irqflags;
 999	int flags = SG_MITER_ATOMIC;
1000	u32 temp;
1001
1002	data->error = -EINPROGRESS;
1003
1004	WARN_ON(host->data);
1005	host->sg = NULL;
1006	host->data = data;
1007
1008	if (data->flags & MMC_DATA_READ) {
1009		host->dir_status = DW_MCI_RECV_STATUS;
1010		dw_mci_ctrl_rd_thld(host, data);
1011	} else {
1012		host->dir_status = DW_MCI_SEND_STATUS;
1013	}
1014
1015	if (dw_mci_submit_data_dma(host, data)) {
 
1016		if (host->data->flags & MMC_DATA_READ)
1017			flags |= SG_MITER_TO_SG;
1018		else
1019			flags |= SG_MITER_FROM_SG;
1020
1021		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1022		host->sg = data->sg;
1023		host->part_buf_start = 0;
1024		host->part_buf_count = 0;
1025
1026		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1027
1028		spin_lock_irqsave(&host->irq_lock, irqflags);
1029		temp = mci_readl(host, INTMASK);
1030		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1031		mci_writel(host, INTMASK, temp);
1032		spin_unlock_irqrestore(&host->irq_lock, irqflags);
1033
1034		temp = mci_readl(host, CTRL);
1035		temp &= ~SDMMC_CTRL_DMA_ENABLE;
1036		mci_writel(host, CTRL, temp);
1037
1038		/*
1039		 * Use the initial fifoth_val for PIO mode.
1040		 * If next issued data may be transfered by DMA mode,
1041		 * prev_blksz should be invalidated.
1042		 */
1043		mci_writel(host, FIFOTH, host->fifoth_val);
1044		host->prev_blksz = 0;
1045	} else {
1046		/*
1047		 * Keep the current block size.
1048		 * It will be used to decide whether to update
1049		 * fifoth register next time.
1050		 */
1051		host->prev_blksz = data->blksz;
1052	}
1053}
1054
1055static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1056{
1057	struct dw_mci *host = slot->host;
1058	unsigned long timeout = jiffies + msecs_to_jiffies(500);
1059	unsigned int cmd_status = 0;
1060
1061	mci_writel(host, CMDARG, arg);
1062	wmb(); /* drain writebuffer */
1063	dw_mci_wait_while_busy(host, cmd);
1064	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1065
1066	while (time_before(jiffies, timeout)) {
1067		cmd_status = mci_readl(host, CMD);
1068		if (!(cmd_status & SDMMC_CMD_START))
1069			return;
1070	}
1071	dev_err(&slot->mmc->class_dev,
1072		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
1073		cmd, arg, cmd_status);
1074}
1075
1076static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1077{
1078	struct dw_mci *host = slot->host;
1079	unsigned int clock = slot->clock;
1080	u32 div;
1081	u32 clk_en_a;
1082	u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1083
1084	/* We must continue to set bit 28 in CMD until the change is complete */
1085	if (host->state == STATE_WAITING_CMD11_DONE)
1086		sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1087
1088	if (!clock) {
1089		mci_writel(host, CLKENA, 0);
1090		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1091	} else if (clock != host->current_speed || force_clkinit) {
1092		div = host->bus_hz / clock;
1093		if (host->bus_hz % clock && host->bus_hz > clock)
1094			/*
1095			 * move the + 1 after the divide to prevent
1096			 * over-clocking the card.
1097			 */
1098			div += 1;
1099
1100		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1101
1102		if ((clock << div) != slot->__clk_old || force_clkinit)
1103			dev_info(&slot->mmc->class_dev,
1104				 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1105				 slot->id, host->bus_hz, clock,
1106				 div ? ((host->bus_hz / div) >> 1) :
1107				 host->bus_hz, div);
1108
1109		/* disable clock */
1110		mci_writel(host, CLKENA, 0);
1111		mci_writel(host, CLKSRC, 0);
1112
1113		/* inform CIU */
1114		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1115
1116		/* set clock to desired speed */
1117		mci_writel(host, CLKDIV, div);
1118
1119		/* inform CIU */
1120		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1121
1122		/* enable clock; only low power if no SDIO */
1123		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1124		if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1125			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1126		mci_writel(host, CLKENA, clk_en_a);
1127
1128		/* inform CIU */
1129		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1130
1131		/* keep the clock with reflecting clock dividor */
1132		slot->__clk_old = clock << div;
1133	}
1134
1135	host->current_speed = clock;
1136
1137	/* Set the current slot bus width */
1138	mci_writel(host, CTYPE, (slot->ctype << slot->id));
1139}
1140
1141static void __dw_mci_start_request(struct dw_mci *host,
1142				   struct dw_mci_slot *slot,
1143				   struct mmc_command *cmd)
1144{
1145	struct mmc_request *mrq;
1146	struct mmc_data	*data;
1147	u32 cmdflags;
1148
1149	mrq = slot->mrq;
 
 
1150
1151	host->cur_slot = slot;
1152	host->mrq = mrq;
1153
1154	host->pending_events = 0;
1155	host->completed_events = 0;
1156	host->cmd_status = 0;
1157	host->data_status = 0;
1158	host->dir_status = 0;
1159
1160	data = cmd->data;
1161	if (data) {
1162		mci_writel(host, TMOUT, 0xFFFFFFFF);
1163		mci_writel(host, BYTCNT, data->blksz*data->blocks);
1164		mci_writel(host, BLKSIZ, data->blksz);
1165	}
1166
1167	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1168
1169	/* this is the first command, send the initialization clock */
1170	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1171		cmdflags |= SDMMC_CMD_INIT;
1172
1173	if (data) {
1174		dw_mci_submit_data(host, data);
1175		wmb(); /* drain writebuffer */
1176	}
1177
1178	dw_mci_start_command(host, cmd, cmdflags);
1179
1180	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1181		unsigned long irqflags;
1182
1183		/*
1184		 * Databook says to fail after 2ms w/ no response, but evidence
1185		 * shows that sometimes the cmd11 interrupt takes over 130ms.
1186		 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1187		 * is just about to roll over.
1188		 *
1189		 * We do this whole thing under spinlock and only if the
1190		 * command hasn't already completed (indicating the the irq
1191		 * already ran so we don't want the timeout).
1192		 */
1193		spin_lock_irqsave(&host->irq_lock, irqflags);
1194		if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1195			mod_timer(&host->cmd11_timer,
1196				jiffies + msecs_to_jiffies(500) + 1);
1197		spin_unlock_irqrestore(&host->irq_lock, irqflags);
1198	}
1199
1200	if (mrq->stop)
1201		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1202	else
1203		host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1204}
1205
1206static void dw_mci_start_request(struct dw_mci *host,
1207				 struct dw_mci_slot *slot)
1208{
1209	struct mmc_request *mrq = slot->mrq;
1210	struct mmc_command *cmd;
1211
1212	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1213	__dw_mci_start_request(host, slot, cmd);
1214}
1215
1216/* must be called with host->lock held */
1217static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1218				 struct mmc_request *mrq)
1219{
1220	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1221		 host->state);
1222
1223	slot->mrq = mrq;
1224
1225	if (host->state == STATE_WAITING_CMD11_DONE) {
1226		dev_warn(&slot->mmc->class_dev,
1227			 "Voltage change didn't complete\n");
1228		/*
1229		 * this case isn't expected to happen, so we can
1230		 * either crash here or just try to continue on
1231		 * in the closest possible state
1232		 */
1233		host->state = STATE_IDLE;
1234	}
1235
1236	if (host->state == STATE_IDLE) {
1237		host->state = STATE_SENDING_CMD;
1238		dw_mci_start_request(host, slot);
1239	} else {
1240		list_add_tail(&slot->queue_node, &host->queue);
1241	}
1242}
1243
1244static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1245{
1246	struct dw_mci_slot *slot = mmc_priv(mmc);
1247	struct dw_mci *host = slot->host;
1248
1249	WARN_ON(slot->mrq);
1250
1251	/*
1252	 * The check for card presence and queueing of the request must be
1253	 * atomic, otherwise the card could be removed in between and the
1254	 * request wouldn't fail until another card was inserted.
1255	 */
1256	spin_lock_bh(&host->lock);
1257
1258	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1259		spin_unlock_bh(&host->lock);
1260		mrq->cmd->error = -ENOMEDIUM;
1261		mmc_request_done(mmc, mrq);
1262		return;
1263	}
1264
1265	dw_mci_queue_request(host, slot, mrq);
1266
1267	spin_unlock_bh(&host->lock);
1268}
1269
1270static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1271{
1272	struct dw_mci_slot *slot = mmc_priv(mmc);
1273	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1274	u32 regs;
1275	int ret;
1276
1277	switch (ios->bus_width) {
1278	case MMC_BUS_WIDTH_4:
1279		slot->ctype = SDMMC_CTYPE_4BIT;
1280		break;
1281	case MMC_BUS_WIDTH_8:
1282		slot->ctype = SDMMC_CTYPE_8BIT;
1283		break;
1284	default:
1285		/* set default 1 bit mode */
1286		slot->ctype = SDMMC_CTYPE_1BIT;
1287	}
1288
1289	regs = mci_readl(slot->host, UHS_REG);
1290
1291	/* DDR mode set */
1292	if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1293	    ios->timing == MMC_TIMING_UHS_DDR50 ||
1294	    ios->timing == MMC_TIMING_MMC_HS400)
1295		regs |= ((0x1 << slot->id) << 16);
1296	else
1297		regs &= ~((0x1 << slot->id) << 16);
1298
1299	mci_writel(slot->host, UHS_REG, regs);
1300	slot->host->timing = ios->timing;
1301
1302	/*
1303	 * Use mirror of ios->clock to prevent race with mmc
1304	 * core ios update when finding the minimum.
1305	 */
1306	slot->clock = ios->clock;
1307
1308	if (drv_data && drv_data->set_ios)
1309		drv_data->set_ios(slot->host, ios);
1310
 
 
 
1311	switch (ios->power_mode) {
1312	case MMC_POWER_UP:
1313		if (!IS_ERR(mmc->supply.vmmc)) {
1314			ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1315					ios->vdd);
1316			if (ret) {
1317				dev_err(slot->host->dev,
1318					"failed to enable vmmc regulator\n");
1319				/*return, if failed turn on vmmc*/
1320				return;
1321			}
1322		}
1323		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
 
 
 
1324		regs = mci_readl(slot->host, PWREN);
1325		regs |= (1 << slot->id);
1326		mci_writel(slot->host, PWREN, regs);
1327		break;
1328	case MMC_POWER_ON:
1329		if (!slot->host->vqmmc_enabled) {
1330			if (!IS_ERR(mmc->supply.vqmmc)) {
1331				ret = regulator_enable(mmc->supply.vqmmc);
1332				if (ret < 0)
1333					dev_err(slot->host->dev,
1334						"failed to enable vqmmc\n");
1335				else
1336					slot->host->vqmmc_enabled = true;
1337
1338			} else {
1339				/* Keep track so we don't reset again */
1340				slot->host->vqmmc_enabled = true;
1341			}
1342
1343			/* Reset our state machine after powering on */
1344			dw_mci_ctrl_reset(slot->host,
1345					  SDMMC_CTRL_ALL_RESET_FLAGS);
1346		}
1347
1348		/* Adjust clock / bus width after power is up */
1349		dw_mci_setup_bus(slot, false);
1350
1351		break;
1352	case MMC_POWER_OFF:
1353		/* Turn clock off before power goes down */
1354		dw_mci_setup_bus(slot, false);
1355
1356		if (!IS_ERR(mmc->supply.vmmc))
1357			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1358
1359		if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1360			regulator_disable(mmc->supply.vqmmc);
1361		slot->host->vqmmc_enabled = false;
1362
1363		regs = mci_readl(slot->host, PWREN);
1364		regs &= ~(1 << slot->id);
1365		mci_writel(slot->host, PWREN, regs);
1366		break;
1367	default:
1368		break;
1369	}
1370
1371	if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1372		slot->host->state = STATE_IDLE;
1373}
1374
1375static int dw_mci_card_busy(struct mmc_host *mmc)
1376{
1377	struct dw_mci_slot *slot = mmc_priv(mmc);
1378	u32 status;
1379
1380	/*
1381	 * Check the busy bit which is low when DAT[3:0]
1382	 * (the data lines) are 0000
1383	 */
1384	status = mci_readl(slot->host, STATUS);
1385
1386	return !!(status & SDMMC_STATUS_BUSY);
1387}
1388
1389static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1390{
1391	struct dw_mci_slot *slot = mmc_priv(mmc);
1392	struct dw_mci *host = slot->host;
1393	const struct dw_mci_drv_data *drv_data = host->drv_data;
1394	u32 uhs;
1395	u32 v18 = SDMMC_UHS_18V << slot->id;
1396	int ret;
1397
1398	if (drv_data && drv_data->switch_voltage)
1399		return drv_data->switch_voltage(mmc, ios);
1400
1401	/*
1402	 * Program the voltage.  Note that some instances of dw_mmc may use
1403	 * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1404	 * does no harm but you need to set the regulator directly.  Try both.
1405	 */
1406	uhs = mci_readl(host, UHS_REG);
1407	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1408		uhs &= ~v18;
1409	else
1410		uhs |= v18;
1411
1412	if (!IS_ERR(mmc->supply.vqmmc)) {
1413		ret = mmc_regulator_set_vqmmc(mmc, ios);
1414
1415		if (ret) {
1416			dev_dbg(&mmc->class_dev,
1417					 "Regulator set error %d - %s V\n",
1418					 ret, uhs & v18 ? "1.8" : "3.3");
1419			return ret;
1420		}
1421	}
1422	mci_writel(host, UHS_REG, uhs);
1423
1424	return 0;
1425}
1426
1427static int dw_mci_get_ro(struct mmc_host *mmc)
1428{
1429	int read_only;
1430	struct dw_mci_slot *slot = mmc_priv(mmc);
1431	int gpio_ro = mmc_gpio_get_ro(mmc);
1432
1433	/* Use platform get_ro function, else try on board write protect */
1434	if (!IS_ERR_VALUE(gpio_ro))
1435		read_only = gpio_ro;
 
 
 
 
1436	else
1437		read_only =
1438			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1439
1440	dev_dbg(&mmc->class_dev, "card is %s\n",
1441		read_only ? "read-only" : "read-write");
1442
1443	return read_only;
1444}
1445
1446static int dw_mci_get_cd(struct mmc_host *mmc)
1447{
1448	int present;
1449	struct dw_mci_slot *slot = mmc_priv(mmc);
 
1450	struct dw_mci *host = slot->host;
1451	int gpio_cd = mmc_gpio_get_cd(mmc);
1452
1453	/* Use platform get_cd function, else try onboard card detect */
1454	if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
1455	    (mmc->caps & MMC_CAP_NONREMOVABLE))
1456		present = 1;
 
 
1457	else if (!IS_ERR_VALUE(gpio_cd))
1458		present = gpio_cd;
1459	else
1460		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1461			== 0 ? 1 : 0;
1462
1463	spin_lock_bh(&host->lock);
1464	if (present) {
1465		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1466		dev_dbg(&mmc->class_dev, "card is present\n");
1467	} else {
1468		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1469		dev_dbg(&mmc->class_dev, "card is not present\n");
1470	}
1471	spin_unlock_bh(&host->lock);
1472
1473	return present;
1474}
1475
1476static void dw_mci_hw_reset(struct mmc_host *mmc)
1477{
1478	struct dw_mci_slot *slot = mmc_priv(mmc);
1479	struct dw_mci *host = slot->host;
1480	int reset;
1481
1482	if (host->use_dma == TRANS_MODE_IDMAC)
1483		dw_mci_idmac_reset(host);
1484
1485	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1486				     SDMMC_CTRL_FIFO_RESET))
1487		return;
1488
1489	/*
1490	 * According to eMMC spec, card reset procedure:
1491	 * tRstW >= 1us:   RST_n pulse width
1492	 * tRSCA >= 200us: RST_n to Command time
1493	 * tRSTH >= 1us:   RST_n high period
1494	 */
1495	reset = mci_readl(host, RST_N);
1496	reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1497	mci_writel(host, RST_N, reset);
1498	usleep_range(1, 2);
1499	reset |= SDMMC_RST_HWACTIVE << slot->id;
1500	mci_writel(host, RST_N, reset);
1501	usleep_range(200, 300);
1502}
1503
1504static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1505{
1506	struct dw_mci_slot *slot = mmc_priv(mmc);
1507	struct dw_mci *host = slot->host;
 
 
1508
1509	/*
1510	 * Low power mode will stop the card clock when idle.  According to the
1511	 * description of the CLKENA register we should disable low power mode
1512	 * for SDIO cards if we need SDIO interrupts to work.
1513	 */
1514	if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1515		const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1516		u32 clk_en_a_old;
1517		u32 clk_en_a;
1518
1519		clk_en_a_old = mci_readl(host, CLKENA);
1520
1521		if (card->type == MMC_TYPE_SDIO ||
1522		    card->type == MMC_TYPE_SD_COMBO) {
1523			set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1524			clk_en_a = clk_en_a_old & ~clken_low_pwr;
1525		} else {
1526			clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1527			clk_en_a = clk_en_a_old | clken_low_pwr;
1528		}
1529
1530		if (clk_en_a != clk_en_a_old) {
1531			mci_writel(host, CLKENA, clk_en_a);
1532			mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1533				     SDMMC_CMD_PRV_DAT_WAIT, 0);
1534		}
1535	}
1536}
1537
1538static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1539{
1540	struct dw_mci_slot *slot = mmc_priv(mmc);
1541	struct dw_mci *host = slot->host;
1542	unsigned long irqflags;
1543	u32 int_mask;
1544
1545	spin_lock_irqsave(&host->irq_lock, irqflags);
1546
1547	/* Enable/disable Slot Specific SDIO interrupt */
1548	int_mask = mci_readl(host, INTMASK);
1549	if (enb)
1550		int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1551	else
1552		int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1553	mci_writel(host, INTMASK, int_mask);
 
 
 
1554
1555	spin_unlock_irqrestore(&host->irq_lock, irqflags);
 
 
 
 
 
1556}
1557
1558static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1559{
1560	struct dw_mci_slot *slot = mmc_priv(mmc);
1561	struct dw_mci *host = slot->host;
1562	const struct dw_mci_drv_data *drv_data = host->drv_data;
1563	int err = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1564
1565	if (drv_data && drv_data->execute_tuning)
1566		err = drv_data->execute_tuning(slot, opcode);
1567	return err;
1568}
1569
1570static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1571				       struct mmc_ios *ios)
1572{
1573	struct dw_mci_slot *slot = mmc_priv(mmc);
1574	struct dw_mci *host = slot->host;
1575	const struct dw_mci_drv_data *drv_data = host->drv_data;
1576
1577	if (drv_data && drv_data->prepare_hs400_tuning)
1578		return drv_data->prepare_hs400_tuning(host, ios);
1579
1580	return 0;
1581}
1582
1583static const struct mmc_host_ops dw_mci_ops = {
1584	.request		= dw_mci_request,
1585	.pre_req		= dw_mci_pre_req,
1586	.post_req		= dw_mci_post_req,
1587	.set_ios		= dw_mci_set_ios,
1588	.get_ro			= dw_mci_get_ro,
1589	.get_cd			= dw_mci_get_cd,
1590	.hw_reset               = dw_mci_hw_reset,
1591	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
1592	.execute_tuning		= dw_mci_execute_tuning,
1593	.card_busy		= dw_mci_card_busy,
1594	.start_signal_voltage_switch = dw_mci_switch_voltage,
1595	.init_card		= dw_mci_init_card,
1596	.prepare_hs400_tuning	= dw_mci_prepare_hs400_tuning,
1597};
1598
1599static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1600	__releases(&host->lock)
1601	__acquires(&host->lock)
1602{
1603	struct dw_mci_slot *slot;
1604	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
1605
1606	WARN_ON(host->cmd || host->data);
1607
1608	host->cur_slot->mrq = NULL;
1609	host->mrq = NULL;
1610	if (!list_empty(&host->queue)) {
1611		slot = list_entry(host->queue.next,
1612				  struct dw_mci_slot, queue_node);
1613		list_del(&slot->queue_node);
1614		dev_vdbg(host->dev, "list not empty: %s is next\n",
1615			 mmc_hostname(slot->mmc));
1616		host->state = STATE_SENDING_CMD;
1617		dw_mci_start_request(host, slot);
1618	} else {
1619		dev_vdbg(host->dev, "list empty\n");
1620
1621		if (host->state == STATE_SENDING_CMD11)
1622			host->state = STATE_WAITING_CMD11_DONE;
1623		else
1624			host->state = STATE_IDLE;
1625	}
1626
1627	spin_unlock(&host->lock);
1628	mmc_request_done(prev_mmc, mrq);
1629	spin_lock(&host->lock);
1630}
1631
1632static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1633{
1634	u32 status = host->cmd_status;
1635
1636	host->cmd_status = 0;
1637
1638	/* Read the response from the card (up to 16 bytes) */
1639	if (cmd->flags & MMC_RSP_PRESENT) {
1640		if (cmd->flags & MMC_RSP_136) {
1641			cmd->resp[3] = mci_readl(host, RESP0);
1642			cmd->resp[2] = mci_readl(host, RESP1);
1643			cmd->resp[1] = mci_readl(host, RESP2);
1644			cmd->resp[0] = mci_readl(host, RESP3);
1645		} else {
1646			cmd->resp[0] = mci_readl(host, RESP0);
1647			cmd->resp[1] = 0;
1648			cmd->resp[2] = 0;
1649			cmd->resp[3] = 0;
1650		}
1651	}
1652
1653	if (status & SDMMC_INT_RTO)
1654		cmd->error = -ETIMEDOUT;
1655	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1656		cmd->error = -EILSEQ;
1657	else if (status & SDMMC_INT_RESP_ERR)
1658		cmd->error = -EIO;
1659	else
1660		cmd->error = 0;
1661
 
 
 
 
 
 
1662	return cmd->error;
1663}
1664
1665static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1666{
1667	u32 status = host->data_status;
1668
1669	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1670		if (status & SDMMC_INT_DRTO) {
1671			data->error = -ETIMEDOUT;
1672		} else if (status & SDMMC_INT_DCRC) {
1673			data->error = -EILSEQ;
1674		} else if (status & SDMMC_INT_EBE) {
1675			if (host->dir_status ==
1676				DW_MCI_SEND_STATUS) {
1677				/*
1678				 * No data CRC status was returned.
1679				 * The number of bytes transferred
1680				 * will be exaggerated in PIO mode.
1681				 */
1682				data->bytes_xfered = 0;
1683				data->error = -ETIMEDOUT;
1684			} else if (host->dir_status ==
1685					DW_MCI_RECV_STATUS) {
1686				data->error = -EIO;
1687			}
1688		} else {
1689			/* SDMMC_INT_SBE is included */
1690			data->error = -EIO;
1691		}
1692
1693		dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1694
1695		/*
1696		 * After an error, there may be data lingering
1697		 * in the FIFO
1698		 */
1699		dw_mci_reset(host);
1700	} else {
1701		data->bytes_xfered = data->blocks * data->blksz;
1702		data->error = 0;
1703	}
1704
1705	return data->error;
1706}
1707
1708static void dw_mci_set_drto(struct dw_mci *host)
1709{
1710	unsigned int drto_clks;
1711	unsigned int drto_ms;
1712
1713	drto_clks = mci_readl(host, TMOUT) >> 8;
1714	drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
1715
1716	/* add a bit spare time */
1717	drto_ms += 10;
1718
1719	mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
1720}
1721
1722static void dw_mci_tasklet_func(unsigned long priv)
1723{
1724	struct dw_mci *host = (struct dw_mci *)priv;
1725	struct mmc_data	*data;
1726	struct mmc_command *cmd;
1727	struct mmc_request *mrq;
1728	enum dw_mci_state state;
1729	enum dw_mci_state prev_state;
1730	unsigned int err;
1731
1732	spin_lock(&host->lock);
1733
1734	state = host->state;
1735	data = host->data;
1736	mrq = host->mrq;
1737
1738	do {
1739		prev_state = state;
1740
1741		switch (state) {
1742		case STATE_IDLE:
1743		case STATE_WAITING_CMD11_DONE:
1744			break;
1745
1746		case STATE_SENDING_CMD11:
1747		case STATE_SENDING_CMD:
1748			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1749						&host->pending_events))
1750				break;
1751
1752			cmd = host->cmd;
1753			host->cmd = NULL;
1754			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1755			err = dw_mci_command_complete(host, cmd);
1756			if (cmd == mrq->sbc && !err) {
1757				prev_state = state = STATE_SENDING_CMD;
1758				__dw_mci_start_request(host, host->cur_slot,
1759						       mrq->cmd);
1760				goto unlock;
1761			}
1762
1763			if (cmd->data && err) {
1764				dw_mci_stop_dma(host);
1765				send_stop_abort(host, data);
1766				state = STATE_SENDING_STOP;
1767				break;
1768			}
1769
1770			if (!cmd->data || err) {
1771				dw_mci_request_end(host, mrq);
1772				goto unlock;
1773			}
1774
1775			prev_state = state = STATE_SENDING_DATA;
1776			/* fall through */
1777
1778		case STATE_SENDING_DATA:
1779			/*
1780			 * We could get a data error and never a transfer
1781			 * complete so we'd better check for it here.
1782			 *
1783			 * Note that we don't really care if we also got a
1784			 * transfer complete; stopping the DMA and sending an
1785			 * abort won't hurt.
1786			 */
1787			if (test_and_clear_bit(EVENT_DATA_ERROR,
1788					       &host->pending_events)) {
1789				dw_mci_stop_dma(host);
1790				if (data->stop ||
1791				    !(host->data_status & (SDMMC_INT_DRTO |
1792							   SDMMC_INT_EBE)))
1793					send_stop_abort(host, data);
1794				state = STATE_DATA_ERROR;
1795				break;
1796			}
1797
1798			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1799						&host->pending_events)) {
1800				/*
1801				 * If all data-related interrupts don't come
1802				 * within the given time in reading data state.
1803				 */
1804				if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
1805				    (host->dir_status == DW_MCI_RECV_STATUS))
1806					dw_mci_set_drto(host);
1807				break;
1808			}
1809
1810			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1811
1812			/*
1813			 * Handle an EVENT_DATA_ERROR that might have shown up
1814			 * before the transfer completed.  This might not have
1815			 * been caught by the check above because the interrupt
1816			 * could have gone off between the previous check and
1817			 * the check for transfer complete.
1818			 *
1819			 * Technically this ought not be needed assuming we
1820			 * get a DATA_COMPLETE eventually (we'll notice the
1821			 * error and end the request), but it shouldn't hurt.
1822			 *
1823			 * This has the advantage of sending the stop command.
1824			 */
1825			if (test_and_clear_bit(EVENT_DATA_ERROR,
1826					       &host->pending_events)) {
1827				dw_mci_stop_dma(host);
1828				if (data->stop ||
1829				    !(host->data_status & (SDMMC_INT_DRTO |
1830							   SDMMC_INT_EBE)))
1831					send_stop_abort(host, data);
1832				state = STATE_DATA_ERROR;
1833				break;
1834			}
1835			prev_state = state = STATE_DATA_BUSY;
1836
1837			/* fall through */
1838
1839		case STATE_DATA_BUSY:
1840			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1841						&host->pending_events)) {
1842				/*
1843				 * If data error interrupt comes but data over
1844				 * interrupt doesn't come within the given time.
1845				 * in reading data state.
1846				 */
1847				if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
1848				    (host->dir_status == DW_MCI_RECV_STATUS))
1849					dw_mci_set_drto(host);
1850				break;
1851			}
1852
1853			host->data = NULL;
1854			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1855			err = dw_mci_data_complete(host, data);
1856
1857			if (!err) {
1858				if (!data->stop || mrq->sbc) {
1859					if (mrq->sbc && data->stop)
1860						data->stop->error = 0;
1861					dw_mci_request_end(host, mrq);
1862					goto unlock;
1863				}
1864
1865				/* stop command for open-ended transfer*/
1866				if (data->stop)
1867					send_stop_abort(host, data);
1868			} else {
1869				/*
1870				 * If we don't have a command complete now we'll
1871				 * never get one since we just reset everything;
1872				 * better end the request.
1873				 *
1874				 * If we do have a command complete we'll fall
1875				 * through to the SENDING_STOP command and
1876				 * everything will be peachy keen.
1877				 */
1878				if (!test_bit(EVENT_CMD_COMPLETE,
1879					      &host->pending_events)) {
1880					host->cmd = NULL;
1881					dw_mci_request_end(host, mrq);
1882					goto unlock;
1883				}
1884			}
1885
1886			/*
1887			 * If err has non-zero,
1888			 * stop-abort command has been already issued.
1889			 */
1890			prev_state = state = STATE_SENDING_STOP;
1891
1892			/* fall through */
1893
1894		case STATE_SENDING_STOP:
1895			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1896						&host->pending_events))
1897				break;
1898
1899			/* CMD error in data command */
1900			if (mrq->cmd->error && mrq->data)
1901				dw_mci_reset(host);
1902
1903			host->cmd = NULL;
1904			host->data = NULL;
1905
1906			if (mrq->stop)
1907				dw_mci_command_complete(host, mrq->stop);
1908			else
1909				host->cmd_status = 0;
1910
1911			dw_mci_request_end(host, mrq);
1912			goto unlock;
1913
1914		case STATE_DATA_ERROR:
1915			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1916						&host->pending_events))
1917				break;
1918
1919			state = STATE_DATA_BUSY;
1920			break;
1921		}
1922	} while (state != prev_state);
1923
1924	host->state = state;
1925unlock:
1926	spin_unlock(&host->lock);
1927
1928}
1929
1930/* push final bytes to part_buf, only use during push */
1931static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1932{
1933	memcpy((void *)&host->part_buf, buf, cnt);
1934	host->part_buf_count = cnt;
1935}
1936
1937/* append bytes to part_buf, only use during push */
1938static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1939{
1940	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1941	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1942	host->part_buf_count += cnt;
1943	return cnt;
1944}
1945
1946/* pull first bytes from part_buf, only use during pull */
1947static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1948{
1949	cnt = min_t(int, cnt, host->part_buf_count);
1950	if (cnt) {
1951		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1952		       cnt);
1953		host->part_buf_count -= cnt;
1954		host->part_buf_start += cnt;
1955	}
1956	return cnt;
1957}
1958
1959/* pull final bytes from the part_buf, assuming it's just been filled */
1960static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1961{
1962	memcpy(buf, &host->part_buf, cnt);
1963	host->part_buf_start = cnt;
1964	host->part_buf_count = (1 << host->data_shift) - cnt;
1965}
1966
1967static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1968{
1969	struct mmc_data *data = host->data;
1970	int init_cnt = cnt;
1971
1972	/* try and push anything in the part_buf */
1973	if (unlikely(host->part_buf_count)) {
1974		int len = dw_mci_push_part_bytes(host, buf, cnt);
1975
1976		buf += len;
1977		cnt -= len;
1978		if (host->part_buf_count == 2) {
1979			mci_fifo_writew(host->fifo_reg, host->part_buf16);
 
1980			host->part_buf_count = 0;
1981		}
1982	}
1983#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1984	if (unlikely((unsigned long)buf & 0x1)) {
1985		while (cnt >= 2) {
1986			u16 aligned_buf[64];
1987			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1988			int items = len >> 1;
1989			int i;
1990			/* memcpy from input buffer into aligned buffer */
1991			memcpy(aligned_buf, buf, len);
1992			buf += len;
1993			cnt -= len;
1994			/* push data from aligned buffer into fifo */
1995			for (i = 0; i < items; ++i)
1996				mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
 
1997		}
1998	} else
1999#endif
2000	{
2001		u16 *pdata = buf;
2002
2003		for (; cnt >= 2; cnt -= 2)
2004			mci_fifo_writew(host->fifo_reg, *pdata++);
2005		buf = pdata;
2006	}
2007	/* put anything remaining in the part_buf */
2008	if (cnt) {
2009		dw_mci_set_part_bytes(host, buf, cnt);
2010		 /* Push data if we have reached the expected data length */
2011		if ((data->bytes_xfered + init_cnt) ==
2012		    (data->blksz * data->blocks))
2013			mci_fifo_writew(host->fifo_reg, host->part_buf16);
 
2014	}
2015}
2016
2017static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2018{
2019#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2020	if (unlikely((unsigned long)buf & 0x1)) {
2021		while (cnt >= 2) {
2022			/* pull data from fifo into aligned buffer */
2023			u16 aligned_buf[64];
2024			int len = min(cnt & -2, (int)sizeof(aligned_buf));
2025			int items = len >> 1;
2026			int i;
2027
2028			for (i = 0; i < items; ++i)
2029				aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
 
2030			/* memcpy from aligned buffer into output buffer */
2031			memcpy(buf, aligned_buf, len);
2032			buf += len;
2033			cnt -= len;
2034		}
2035	} else
2036#endif
2037	{
2038		u16 *pdata = buf;
2039
2040		for (; cnt >= 2; cnt -= 2)
2041			*pdata++ = mci_fifo_readw(host->fifo_reg);
2042		buf = pdata;
2043	}
2044	if (cnt) {
2045		host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2046		dw_mci_pull_final_bytes(host, buf, cnt);
2047	}
2048}
2049
2050static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2051{
2052	struct mmc_data *data = host->data;
2053	int init_cnt = cnt;
2054
2055	/* try and push anything in the part_buf */
2056	if (unlikely(host->part_buf_count)) {
2057		int len = dw_mci_push_part_bytes(host, buf, cnt);
2058
2059		buf += len;
2060		cnt -= len;
2061		if (host->part_buf_count == 4) {
2062			mci_fifo_writel(host->fifo_reg,	host->part_buf32);
 
2063			host->part_buf_count = 0;
2064		}
2065	}
2066#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2067	if (unlikely((unsigned long)buf & 0x3)) {
2068		while (cnt >= 4) {
2069			u32 aligned_buf[32];
2070			int len = min(cnt & -4, (int)sizeof(aligned_buf));
2071			int items = len >> 2;
2072			int i;
2073			/* memcpy from input buffer into aligned buffer */
2074			memcpy(aligned_buf, buf, len);
2075			buf += len;
2076			cnt -= len;
2077			/* push data from aligned buffer into fifo */
2078			for (i = 0; i < items; ++i)
2079				mci_fifo_writel(host->fifo_reg,	aligned_buf[i]);
 
2080		}
2081	} else
2082#endif
2083	{
2084		u32 *pdata = buf;
2085
2086		for (; cnt >= 4; cnt -= 4)
2087			mci_fifo_writel(host->fifo_reg, *pdata++);
2088		buf = pdata;
2089	}
2090	/* put anything remaining in the part_buf */
2091	if (cnt) {
2092		dw_mci_set_part_bytes(host, buf, cnt);
2093		 /* Push data if we have reached the expected data length */
2094		if ((data->bytes_xfered + init_cnt) ==
2095		    (data->blksz * data->blocks))
2096			mci_fifo_writel(host->fifo_reg, host->part_buf32);
 
2097	}
2098}
2099
2100static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2101{
2102#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2103	if (unlikely((unsigned long)buf & 0x3)) {
2104		while (cnt >= 4) {
2105			/* pull data from fifo into aligned buffer */
2106			u32 aligned_buf[32];
2107			int len = min(cnt & -4, (int)sizeof(aligned_buf));
2108			int items = len >> 2;
2109			int i;
2110
2111			for (i = 0; i < items; ++i)
2112				aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
 
2113			/* memcpy from aligned buffer into output buffer */
2114			memcpy(buf, aligned_buf, len);
2115			buf += len;
2116			cnt -= len;
2117		}
2118	} else
2119#endif
2120	{
2121		u32 *pdata = buf;
2122
2123		for (; cnt >= 4; cnt -= 4)
2124			*pdata++ = mci_fifo_readl(host->fifo_reg);
2125		buf = pdata;
2126	}
2127	if (cnt) {
2128		host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2129		dw_mci_pull_final_bytes(host, buf, cnt);
2130	}
2131}
2132
2133static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2134{
2135	struct mmc_data *data = host->data;
2136	int init_cnt = cnt;
2137
2138	/* try and push anything in the part_buf */
2139	if (unlikely(host->part_buf_count)) {
2140		int len = dw_mci_push_part_bytes(host, buf, cnt);
2141
2142		buf += len;
2143		cnt -= len;
2144
2145		if (host->part_buf_count == 8) {
2146			mci_fifo_writeq(host->fifo_reg,	host->part_buf);
 
2147			host->part_buf_count = 0;
2148		}
2149	}
2150#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2151	if (unlikely((unsigned long)buf & 0x7)) {
2152		while (cnt >= 8) {
2153			u64 aligned_buf[16];
2154			int len = min(cnt & -8, (int)sizeof(aligned_buf));
2155			int items = len >> 3;
2156			int i;
2157			/* memcpy from input buffer into aligned buffer */
2158			memcpy(aligned_buf, buf, len);
2159			buf += len;
2160			cnt -= len;
2161			/* push data from aligned buffer into fifo */
2162			for (i = 0; i < items; ++i)
2163				mci_fifo_writeq(host->fifo_reg,	aligned_buf[i]);
 
2164		}
2165	} else
2166#endif
2167	{
2168		u64 *pdata = buf;
2169
2170		for (; cnt >= 8; cnt -= 8)
2171			mci_fifo_writeq(host->fifo_reg, *pdata++);
2172		buf = pdata;
2173	}
2174	/* put anything remaining in the part_buf */
2175	if (cnt) {
2176		dw_mci_set_part_bytes(host, buf, cnt);
2177		/* Push data if we have reached the expected data length */
2178		if ((data->bytes_xfered + init_cnt) ==
2179		    (data->blksz * data->blocks))
2180			mci_fifo_writeq(host->fifo_reg, host->part_buf);
 
2181	}
2182}
2183
2184static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2185{
2186#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2187	if (unlikely((unsigned long)buf & 0x7)) {
2188		while (cnt >= 8) {
2189			/* pull data from fifo into aligned buffer */
2190			u64 aligned_buf[16];
2191			int len = min(cnt & -8, (int)sizeof(aligned_buf));
2192			int items = len >> 3;
2193			int i;
2194
2195			for (i = 0; i < items; ++i)
2196				aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2197
2198			/* memcpy from aligned buffer into output buffer */
2199			memcpy(buf, aligned_buf, len);
2200			buf += len;
2201			cnt -= len;
2202		}
2203	} else
2204#endif
2205	{
2206		u64 *pdata = buf;
2207
2208		for (; cnt >= 8; cnt -= 8)
2209			*pdata++ = mci_fifo_readq(host->fifo_reg);
2210		buf = pdata;
2211	}
2212	if (cnt) {
2213		host->part_buf = mci_fifo_readq(host->fifo_reg);
2214		dw_mci_pull_final_bytes(host, buf, cnt);
2215	}
2216}
2217
2218static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2219{
2220	int len;
2221
2222	/* get remaining partial bytes */
2223	len = dw_mci_pull_part_bytes(host, buf, cnt);
2224	if (unlikely(len == cnt))
2225		return;
2226	buf += len;
2227	cnt -= len;
2228
2229	/* get the rest of the data */
2230	host->pull_data(host, buf, cnt);
2231}
2232
2233static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2234{
2235	struct sg_mapping_iter *sg_miter = &host->sg_miter;
2236	void *buf;
2237	unsigned int offset;
2238	struct mmc_data	*data = host->data;
2239	int shift = host->data_shift;
2240	u32 status;
2241	unsigned int len;
2242	unsigned int remain, fcnt;
2243
2244	do {
2245		if (!sg_miter_next(sg_miter))
2246			goto done;
2247
2248		host->sg = sg_miter->piter.sg;
2249		buf = sg_miter->addr;
2250		remain = sg_miter->length;
2251		offset = 0;
2252
2253		do {
2254			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2255					<< shift) + host->part_buf_count;
2256			len = min(remain, fcnt);
2257			if (!len)
2258				break;
2259			dw_mci_pull_data(host, (void *)(buf + offset), len);
2260			data->bytes_xfered += len;
2261			offset += len;
2262			remain -= len;
2263		} while (remain);
2264
2265		sg_miter->consumed = offset;
2266		status = mci_readl(host, MINTSTS);
2267		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2268	/* if the RXDR is ready read again */
2269	} while ((status & SDMMC_INT_RXDR) ||
2270		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2271
2272	if (!remain) {
2273		if (!sg_miter_next(sg_miter))
2274			goto done;
2275		sg_miter->consumed = 0;
2276	}
2277	sg_miter_stop(sg_miter);
2278	return;
2279
2280done:
2281	sg_miter_stop(sg_miter);
2282	host->sg = NULL;
2283	smp_wmb(); /* drain writebuffer */
2284	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2285}
2286
2287static void dw_mci_write_data_pio(struct dw_mci *host)
2288{
2289	struct sg_mapping_iter *sg_miter = &host->sg_miter;
2290	void *buf;
2291	unsigned int offset;
2292	struct mmc_data	*data = host->data;
2293	int shift = host->data_shift;
2294	u32 status;
2295	unsigned int len;
2296	unsigned int fifo_depth = host->fifo_depth;
2297	unsigned int remain, fcnt;
2298
2299	do {
2300		if (!sg_miter_next(sg_miter))
2301			goto done;
2302
2303		host->sg = sg_miter->piter.sg;
2304		buf = sg_miter->addr;
2305		remain = sg_miter->length;
2306		offset = 0;
2307
2308		do {
2309			fcnt = ((fifo_depth -
2310				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2311					<< shift) - host->part_buf_count;
2312			len = min(remain, fcnt);
2313			if (!len)
2314				break;
2315			host->push_data(host, (void *)(buf + offset), len);
2316			data->bytes_xfered += len;
2317			offset += len;
2318			remain -= len;
2319		} while (remain);
2320
2321		sg_miter->consumed = offset;
2322		status = mci_readl(host, MINTSTS);
2323		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2324	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2325
2326	if (!remain) {
2327		if (!sg_miter_next(sg_miter))
2328			goto done;
2329		sg_miter->consumed = 0;
2330	}
2331	sg_miter_stop(sg_miter);
2332	return;
2333
2334done:
2335	sg_miter_stop(sg_miter);
2336	host->sg = NULL;
2337	smp_wmb(); /* drain writebuffer */
2338	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2339}
2340
2341static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2342{
2343	if (!host->cmd_status)
2344		host->cmd_status = status;
2345
2346	smp_wmb(); /* drain writebuffer */
2347
2348	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2349	tasklet_schedule(&host->tasklet);
2350}
2351
2352static void dw_mci_handle_cd(struct dw_mci *host)
2353{
2354	int i;
2355
2356	for (i = 0; i < host->num_slots; i++) {
2357		struct dw_mci_slot *slot = host->slot[i];
2358
2359		if (!slot)
2360			continue;
2361
2362		if (slot->mmc->ops->card_event)
2363			slot->mmc->ops->card_event(slot->mmc);
2364		mmc_detect_change(slot->mmc,
2365			msecs_to_jiffies(host->pdata->detect_delay_ms));
2366	}
2367}
2368
2369static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2370{
2371	struct dw_mci *host = dev_id;
2372	u32 pending;
2373	int i;
2374
2375	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2376
2377	if (pending) {
2378		/* Check volt switch first, since it can look like an error */
2379		if ((host->state == STATE_SENDING_CMD11) &&
2380		    (pending & SDMMC_INT_VOLT_SWITCH)) {
2381			unsigned long irqflags;
2382
2383			mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2384			pending &= ~SDMMC_INT_VOLT_SWITCH;
2385
2386			/*
2387			 * Hold the lock; we know cmd11_timer can't be kicked
2388			 * off after the lock is released, so safe to delete.
2389			 */
2390			spin_lock_irqsave(&host->irq_lock, irqflags);
2391			dw_mci_cmd_interrupt(host, pending);
2392			spin_unlock_irqrestore(&host->irq_lock, irqflags);
2393
2394			del_timer(&host->cmd11_timer);
2395		}
2396
 
2397		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2398			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2399			host->cmd_status = pending;
2400			smp_wmb(); /* drain writebuffer */
2401			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2402		}
2403
2404		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2405			/* if there is an error report DATA_ERROR */
2406			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2407			host->data_status = pending;
2408			smp_wmb(); /* drain writebuffer */
2409			set_bit(EVENT_DATA_ERROR, &host->pending_events);
2410			tasklet_schedule(&host->tasklet);
2411		}
2412
2413		if (pending & SDMMC_INT_DATA_OVER) {
2414			if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
2415				del_timer(&host->dto_timer);
2416
2417			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2418			if (!host->data_status)
2419				host->data_status = pending;
2420			smp_wmb(); /* drain writebuffer */
2421			if (host->dir_status == DW_MCI_RECV_STATUS) {
2422				if (host->sg != NULL)
2423					dw_mci_read_data_pio(host, true);
2424			}
2425			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2426			tasklet_schedule(&host->tasklet);
2427		}
2428
2429		if (pending & SDMMC_INT_RXDR) {
2430			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2431			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2432				dw_mci_read_data_pio(host, false);
2433		}
2434
2435		if (pending & SDMMC_INT_TXDR) {
2436			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2437			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2438				dw_mci_write_data_pio(host);
2439		}
2440
2441		if (pending & SDMMC_INT_CMD_DONE) {
2442			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2443			dw_mci_cmd_interrupt(host, pending);
2444		}
2445
2446		if (pending & SDMMC_INT_CD) {
2447			mci_writel(host, RINTSTS, SDMMC_INT_CD);
2448			dw_mci_handle_cd(host);
2449		}
2450
2451		/* Handle SDIO Interrupts */
2452		for (i = 0; i < host->num_slots; i++) {
2453			struct dw_mci_slot *slot = host->slot[i];
2454
2455			if (!slot)
2456				continue;
2457
2458			if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2459				mci_writel(host, RINTSTS,
2460					   SDMMC_INT_SDIO(slot->sdio_id));
2461				mmc_signal_sdio_irq(slot->mmc);
2462			}
2463		}
2464
2465	}
2466
2467	if (host->use_dma != TRANS_MODE_IDMAC)
2468		return IRQ_HANDLED;
2469
2470	/* Handle IDMA interrupts */
2471	if (host->dma_64bit_address == 1) {
2472		pending = mci_readl(host, IDSTS64);
2473		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2474			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2475							SDMMC_IDMAC_INT_RI);
2476			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2477			host->dma_ops->complete((void *)host);
2478		}
2479	} else {
2480		pending = mci_readl(host, IDSTS);
2481		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2482			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2483							SDMMC_IDMAC_INT_RI);
2484			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2485			host->dma_ops->complete((void *)host);
2486		}
2487	}
 
2488
2489	return IRQ_HANDLED;
2490}
2491
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2492#ifdef CONFIG_OF
2493/* given a slot, find out the device node representing that slot */
2494static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot)
2495{
2496	struct device *dev = slot->mmc->parent;
2497	struct device_node *np;
2498	const __be32 *addr;
2499	int len;
2500
2501	if (!dev || !dev->of_node)
2502		return NULL;
2503
2504	for_each_child_of_node(dev->of_node, np) {
2505		addr = of_get_property(np, "reg", &len);
2506		if (!addr || (len < sizeof(int)))
2507			continue;
2508		if (be32_to_cpup(addr) == slot->id)
2509			return np;
2510	}
2511	return NULL;
2512}
2513
2514static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
 
 
 
 
 
 
 
 
 
 
2515{
2516	struct device_node *np = dw_mci_of_find_slot_node(slot);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2517
2518	if (!np)
2519		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2520
2521	if (of_property_read_bool(np, "disable-wp")) {
2522		slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
2523		dev_warn(slot->mmc->parent,
2524			"Slot quirk 'disable-wp' is deprecated\n");
2525	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2526}
2527#else /* CONFIG_OF */
2528static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
 
 
 
 
 
 
 
 
2529{
 
 
 
 
 
 
 
 
 
 
2530}
2531#endif /* CONFIG_OF */
2532
2533static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2534{
2535	struct mmc_host *mmc;
2536	struct dw_mci_slot *slot;
2537	const struct dw_mci_drv_data *drv_data = host->drv_data;
2538	int ctrl_id, ret;
2539	u32 freq[2];
 
2540
2541	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2542	if (!mmc)
2543		return -ENOMEM;
2544
2545	slot = mmc_priv(mmc);
2546	slot->id = id;
2547	slot->sdio_id = host->sdio_id0 + id;
2548	slot->mmc = mmc;
2549	slot->host = host;
2550	host->slot[id] = slot;
2551
 
 
2552	mmc->ops = &dw_mci_ops;
2553	if (of_property_read_u32_array(host->dev->of_node,
2554				       "clock-freq-min-max", freq, 2)) {
2555		mmc->f_min = DW_MCI_FREQ_MIN;
2556		mmc->f_max = DW_MCI_FREQ_MAX;
2557	} else {
2558		mmc->f_min = freq[0];
2559		mmc->f_max = freq[1];
2560	}
2561
2562	/*if there are external regulators, get them*/
2563	ret = mmc_regulator_get_supply(mmc);
2564	if (ret == -EPROBE_DEFER)
2565		goto err_host_allocated;
2566
2567	if (!mmc->ocr_avail)
2568		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2569
 
 
 
 
 
 
 
2570	if (host->pdata->caps)
2571		mmc->caps = host->pdata->caps;
2572
2573	if (host->pdata->pm_caps)
2574		mmc->pm_caps = host->pdata->pm_caps;
2575
2576	if (host->dev->of_node) {
2577		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2578		if (ctrl_id < 0)
2579			ctrl_id = 0;
2580	} else {
2581		ctrl_id = to_platform_device(host->dev)->id;
2582	}
2583	if (drv_data && drv_data->caps)
2584		mmc->caps |= drv_data->caps[ctrl_id];
2585
2586	if (host->pdata->caps2)
2587		mmc->caps2 = host->pdata->caps2;
2588
2589	dw_mci_slot_of_parse(slot);
2590
2591	ret = mmc_of_parse(mmc);
2592	if (ret)
2593		goto err_host_allocated;
 
2594
2595	/* Useful defaults if platform data is unset. */
2596	if (host->use_dma == TRANS_MODE_IDMAC) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2597		mmc->max_segs = host->ring_size;
2598		mmc->max_blk_size = 65536;
 
2599		mmc->max_seg_size = 0x1000;
2600		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2601		mmc->max_blk_count = mmc->max_req_size / 512;
2602	} else if (host->use_dma == TRANS_MODE_EDMAC) {
2603		mmc->max_segs = 64;
2604		mmc->max_blk_size = 65536;
2605		mmc->max_blk_count = 65535;
2606		mmc->max_req_size =
2607				mmc->max_blk_size * mmc->max_blk_count;
2608		mmc->max_seg_size = mmc->max_req_size;
2609	} else {
2610		/* TRANS_MODE_PIO */
2611		mmc->max_segs = 64;
2612		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2613		mmc->max_blk_count = 512;
2614		mmc->max_req_size = mmc->max_blk_size *
2615				    mmc->max_blk_count;
2616		mmc->max_seg_size = mmc->max_req_size;
 
2617	}
2618
2619	if (dw_mci_get_cd(mmc))
2620		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2621	else
2622		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2623
2624	ret = mmc_add_host(mmc);
2625	if (ret)
2626		goto err_host_allocated;
2627
2628#if defined(CONFIG_DEBUG_FS)
2629	dw_mci_init_debugfs(slot);
2630#endif
2631
 
 
 
2632	return 0;
2633
2634err_host_allocated:
2635	mmc_free_host(mmc);
2636	return ret;
2637}
2638
2639static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2640{
 
 
 
 
2641	/* Debugfs stuff is cleaned up by mmc core */
2642	mmc_remove_host(slot->mmc);
2643	slot->host->slot[id] = NULL;
2644	mmc_free_host(slot->mmc);
2645}
2646
2647static void dw_mci_init_dma(struct dw_mci *host)
2648{
2649	int addr_config;
2650	struct device *dev = host->dev;
2651	struct device_node *np = dev->of_node;
2652
2653	/*
2654	* Check tansfer mode from HCON[17:16]
2655	* Clear the ambiguous description of dw_mmc databook:
2656	* 2b'00: No DMA Interface -> Actually means using Internal DMA block
2657	* 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2658	* 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2659	* 2b'11: Non DW DMA Interface -> pio only
2660	* Compared to DesignWare DMA Interface, Generic DMA Interface has a
2661	* simpler request/acknowledge handshake mechanism and both of them
2662	* are regarded as external dma master for dw_mmc.
2663	*/
2664	host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2665	if (host->use_dma == DMA_INTERFACE_IDMA) {
2666		host->use_dma = TRANS_MODE_IDMAC;
2667	} else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2668		   host->use_dma == DMA_INTERFACE_GDMA) {
2669		host->use_dma = TRANS_MODE_EDMAC;
2670	} else {
2671		goto no_dma;
2672	}
2673
2674	/* Determine which DMA interface to use */
2675	if (host->use_dma == TRANS_MODE_IDMAC) {
2676		/*
2677		* Check ADDR_CONFIG bit in HCON to find
2678		* IDMAC address bus width
2679		*/
2680		addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
2681
2682		if (addr_config == 1) {
2683			/* host supports IDMAC in 64-bit address mode */
2684			host->dma_64bit_address = 1;
2685			dev_info(host->dev,
2686				 "IDMAC supports 64-bit address mode.\n");
2687			if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2688				dma_set_coherent_mask(host->dev,
2689						      DMA_BIT_MASK(64));
2690		} else {
2691			/* host supports IDMAC in 32-bit address mode */
2692			host->dma_64bit_address = 0;
2693			dev_info(host->dev,
2694				 "IDMAC supports 32-bit address mode.\n");
2695		}
2696
2697		/* Alloc memory for sg translation */
2698		host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2699						   &host->sg_dma, GFP_KERNEL);
2700		if (!host->sg_cpu) {
2701			dev_err(host->dev,
2702				"%s: could not alloc DMA memory\n",
2703				__func__);
2704			goto no_dma;
2705		}
2706
2707		host->dma_ops = &dw_mci_idmac_ops;
2708		dev_info(host->dev, "Using internal DMA controller.\n");
2709	} else {
2710		/* TRANS_MODE_EDMAC: check dma bindings again */
2711		if ((of_property_count_strings(np, "dma-names") < 0) ||
2712		    (!of_find_property(np, "dmas", NULL))) {
2713			goto no_dma;
2714		}
2715		host->dma_ops = &dw_mci_edmac_ops;
2716		dev_info(host->dev, "Using external DMA controller.\n");
2717	}
2718
2719	if (host->dma_ops->init && host->dma_ops->start &&
2720	    host->dma_ops->stop && host->dma_ops->cleanup) {
2721		if (host->dma_ops->init(host)) {
2722			dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
2723				__func__);
2724			goto no_dma;
2725		}
2726	} else {
2727		dev_err(host->dev, "DMA initialization not found.\n");
2728		goto no_dma;
2729	}
2730
 
2731	return;
2732
2733no_dma:
2734	dev_info(host->dev, "Using PIO mode.\n");
2735	host->use_dma = TRANS_MODE_PIO;
 
2736}
2737
2738static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2739{
2740	unsigned long timeout = jiffies + msecs_to_jiffies(500);
2741	u32 ctrl;
2742
2743	ctrl = mci_readl(host, CTRL);
2744	ctrl |= reset;
2745	mci_writel(host, CTRL, ctrl);
2746
2747	/* wait till resets clear */
2748	do {
2749		ctrl = mci_readl(host, CTRL);
2750		if (!(ctrl & reset))
2751			return true;
2752	} while (time_before(jiffies, timeout));
2753
2754	dev_err(host->dev,
2755		"Timeout resetting block (ctrl reset %#x)\n",
2756		ctrl & reset);
2757
2758	return false;
2759}
2760
2761static bool dw_mci_reset(struct dw_mci *host)
2762{
2763	u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2764	bool ret = false;
2765
2766	/*
2767	 * Reseting generates a block interrupt, hence setting
2768	 * the scatter-gather pointer to NULL.
2769	 */
2770	if (host->sg) {
2771		sg_miter_stop(&host->sg_miter);
2772		host->sg = NULL;
2773	}
2774
2775	if (host->use_dma)
2776		flags |= SDMMC_CTRL_DMA_RESET;
2777
2778	if (dw_mci_ctrl_reset(host, flags)) {
2779		/*
2780		 * In all cases we clear the RAWINTS register to clear any
2781		 * interrupts.
2782		 */
2783		mci_writel(host, RINTSTS, 0xFFFFFFFF);
2784
2785		/* if using dma we wait for dma_req to clear */
2786		if (host->use_dma) {
2787			unsigned long timeout = jiffies + msecs_to_jiffies(500);
2788			u32 status;
2789
2790			do {
2791				status = mci_readl(host, STATUS);
2792				if (!(status & SDMMC_STATUS_DMA_REQ))
2793					break;
2794				cpu_relax();
2795			} while (time_before(jiffies, timeout));
2796
2797			if (status & SDMMC_STATUS_DMA_REQ) {
2798				dev_err(host->dev,
2799					"%s: Timeout waiting for dma_req to clear during reset\n",
2800					__func__);
2801				goto ciu_out;
2802			}
2803
2804			/* when using DMA next we reset the fifo again */
2805			if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2806				goto ciu_out;
2807		}
2808	} else {
2809		/* if the controller reset bit did clear, then set clock regs */
2810		if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2811			dev_err(host->dev,
2812				"%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
2813				__func__);
2814			goto ciu_out;
2815		}
2816	}
2817
2818	if (host->use_dma == TRANS_MODE_IDMAC)
2819		/* It is also recommended that we reset and reprogram idmac */
2820		dw_mci_idmac_reset(host);
2821
2822	ret = true;
2823
2824ciu_out:
2825	/* After a CTRL reset we need to have CIU set clock registers  */
2826	mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2827
2828	return ret;
2829}
2830
2831static void dw_mci_cmd11_timer(unsigned long arg)
2832{
2833	struct dw_mci *host = (struct dw_mci *)arg;
2834
2835	if (host->state != STATE_SENDING_CMD11) {
2836		dev_warn(host->dev, "Unexpected CMD11 timeout\n");
2837		return;
2838	}
2839
2840	host->cmd_status = SDMMC_INT_RTO;
2841	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2842	tasklet_schedule(&host->tasklet);
2843}
2844
2845static void dw_mci_dto_timer(unsigned long arg)
2846{
2847	struct dw_mci *host = (struct dw_mci *)arg;
2848
2849	switch (host->state) {
2850	case STATE_SENDING_DATA:
2851	case STATE_DATA_BUSY:
2852		/*
2853		 * If DTO interrupt does NOT come in sending data state,
2854		 * we should notify the driver to terminate current transfer
2855		 * and report a data timeout to the core.
2856		 */
2857		host->data_status = SDMMC_INT_DRTO;
2858		set_bit(EVENT_DATA_ERROR, &host->pending_events);
2859		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2860		tasklet_schedule(&host->tasklet);
2861		break;
2862	default:
2863		break;
2864	}
2865}
2866
2867#ifdef CONFIG_OF
 
 
 
 
 
 
 
 
 
 
2868static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2869{
2870	struct dw_mci_board *pdata;
2871	struct device *dev = host->dev;
2872	struct device_node *np = dev->of_node;
2873	const struct dw_mci_drv_data *drv_data = host->drv_data;
2874	int ret;
2875	u32 clock_frequency;
2876
2877	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2878	if (!pdata)
 
2879		return ERR_PTR(-ENOMEM);
 
2880
2881	/* find out number of slots supported */
2882	of_property_read_u32(np, "num-slots", &pdata->num_slots);
 
 
 
 
 
 
 
 
 
 
2883
2884	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2885		dev_info(dev,
2886			 "fifo-depth property not found, using value of FIFOTH register as default\n");
2887
2888	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2889
2890	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2891		pdata->bus_hz = clock_frequency;
2892
2893	if (drv_data && drv_data->parse_dt) {
2894		ret = drv_data->parse_dt(host);
2895		if (ret)
2896			return ERR_PTR(ret);
2897	}
2898
2899	if (of_find_property(np, "supports-highspeed", NULL)) {
2900		dev_info(dev, "supports-highspeed property is deprecated.\n");
 
 
 
 
 
2901		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2902	}
 
 
 
 
 
 
 
 
2903
2904	return pdata;
2905}
2906
2907#else /* CONFIG_OF */
2908static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2909{
2910	return ERR_PTR(-EINVAL);
2911}
2912#endif /* CONFIG_OF */
2913
2914static void dw_mci_enable_cd(struct dw_mci *host)
2915{
2916	unsigned long irqflags;
2917	u32 temp;
2918	int i;
2919	struct dw_mci_slot *slot;
2920
2921	/*
2922	 * No need for CD if all slots have a non-error GPIO
2923	 * as well as broken card detection is found.
2924	 */
2925	for (i = 0; i < host->num_slots; i++) {
2926		slot = host->slot[i];
2927		if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
2928			return;
2929
2930		if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
2931			break;
2932	}
2933	if (i == host->num_slots)
2934		return;
2935
2936	spin_lock_irqsave(&host->irq_lock, irqflags);
2937	temp = mci_readl(host, INTMASK);
2938	temp  |= SDMMC_INT_CD;
2939	mci_writel(host, INTMASK, temp);
2940	spin_unlock_irqrestore(&host->irq_lock, irqflags);
2941}
2942
2943int dw_mci_probe(struct dw_mci *host)
2944{
2945	const struct dw_mci_drv_data *drv_data = host->drv_data;
2946	int width, i, ret = 0;
2947	u32 fifo_size;
2948	int init_slots = 0;
2949
2950	if (!host->pdata) {
2951		host->pdata = dw_mci_parse_dt(host);
2952		if (IS_ERR(host->pdata)) {
2953			dev_err(host->dev, "platform data not available\n");
2954			return -EINVAL;
2955		}
2956	}
2957
 
 
 
 
 
 
2958	host->biu_clk = devm_clk_get(host->dev, "biu");
2959	if (IS_ERR(host->biu_clk)) {
2960		dev_dbg(host->dev, "biu clock not available\n");
2961	} else {
2962		ret = clk_prepare_enable(host->biu_clk);
2963		if (ret) {
2964			dev_err(host->dev, "failed to enable biu clock\n");
2965			return ret;
2966		}
2967	}
2968
2969	host->ciu_clk = devm_clk_get(host->dev, "ciu");
2970	if (IS_ERR(host->ciu_clk)) {
2971		dev_dbg(host->dev, "ciu clock not available\n");
2972		host->bus_hz = host->pdata->bus_hz;
2973	} else {
2974		ret = clk_prepare_enable(host->ciu_clk);
2975		if (ret) {
2976			dev_err(host->dev, "failed to enable ciu clock\n");
2977			goto err_clk_biu;
2978		}
2979
2980		if (host->pdata->bus_hz) {
2981			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2982			if (ret)
2983				dev_warn(host->dev,
2984					 "Unable to set bus rate to %uHz\n",
2985					 host->pdata->bus_hz);
2986		}
2987		host->bus_hz = clk_get_rate(host->ciu_clk);
2988	}
2989
2990	if (!host->bus_hz) {
2991		dev_err(host->dev,
2992			"Platform data must supply bus speed\n");
2993		ret = -ENODEV;
2994		goto err_clk_ciu;
2995	}
2996
2997	if (drv_data && drv_data->init) {
2998		ret = drv_data->init(host);
2999		if (ret) {
3000			dev_err(host->dev,
3001				"implementation specific init failed\n");
3002			goto err_clk_ciu;
3003		}
3004	}
3005
3006	if (drv_data && drv_data->setup_clock) {
3007		ret = drv_data->setup_clock(host);
3008		if (ret) {
3009			dev_err(host->dev,
3010				"implementation specific clock setup failed\n");
3011			goto err_clk_ciu;
3012		}
3013	}
3014
3015	setup_timer(&host->cmd11_timer,
3016		    dw_mci_cmd11_timer, (unsigned long)host);
 
 
 
3017
3018	host->quirks = host->pdata->quirks;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3019
3020	if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
3021		setup_timer(&host->dto_timer,
3022			    dw_mci_dto_timer, (unsigned long)host);
3023
3024	spin_lock_init(&host->lock);
3025	spin_lock_init(&host->irq_lock);
3026	INIT_LIST_HEAD(&host->queue);
3027
3028	/*
3029	 * Get the host data width - this assumes that HCON has been set with
3030	 * the correct values.
3031	 */
3032	i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3033	if (!i) {
3034		host->push_data = dw_mci_push_data16;
3035		host->pull_data = dw_mci_pull_data16;
3036		width = 16;
3037		host->data_shift = 1;
3038	} else if (i == 2) {
3039		host->push_data = dw_mci_push_data64;
3040		host->pull_data = dw_mci_pull_data64;
3041		width = 64;
3042		host->data_shift = 3;
3043	} else {
3044		/* Check for a reserved value, and warn if it is */
3045		WARN((i != 1),
3046		     "HCON reports a reserved host data width!\n"
3047		     "Defaulting to 32-bit access.\n");
3048		host->push_data = dw_mci_push_data32;
3049		host->pull_data = dw_mci_pull_data32;
3050		width = 32;
3051		host->data_shift = 2;
3052	}
3053
3054	/* Reset all blocks */
3055	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3056		ret = -ENODEV;
3057		goto err_clk_ciu;
3058	}
3059
3060	host->dma_ops = host->pdata->dma_ops;
3061	dw_mci_init_dma(host);
3062
3063	/* Clear the interrupts for the host controller */
3064	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3065	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3066
3067	/* Put in max timeout */
3068	mci_writel(host, TMOUT, 0xFFFFFFFF);
3069
3070	/*
3071	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
3072	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
3073	 */
3074	if (!host->pdata->fifo_depth) {
3075		/*
3076		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3077		 * have been overwritten by the bootloader, just like we're
3078		 * about to do, so if you know the value for your hardware, you
3079		 * should put it in the platform data.
3080		 */
3081		fifo_size = mci_readl(host, FIFOTH);
3082		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3083	} else {
3084		fifo_size = host->pdata->fifo_depth;
3085	}
3086	host->fifo_depth = fifo_size;
3087	host->fifoth_val =
3088		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3089	mci_writel(host, FIFOTH, host->fifoth_val);
3090
3091	/* disable clock to CIU */
3092	mci_writel(host, CLKENA, 0);
3093	mci_writel(host, CLKSRC, 0);
3094
3095	/*
3096	 * In 2.40a spec, Data offset is changed.
3097	 * Need to check the version-id and set data-offset for DATA register.
3098	 */
3099	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3100	dev_info(host->dev, "Version ID is %04x\n", host->verid);
3101
3102	if (host->verid < DW_MMC_240A)
3103		host->fifo_reg = host->regs + DATA_OFFSET;
3104	else
3105		host->fifo_reg = host->regs + DATA_240A_OFFSET;
3106
3107	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
 
 
 
 
 
 
 
3108	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3109			       host->irq_flags, "dw-mci", host);
3110	if (ret)
3111		goto err_dmaunmap;
3112
3113	if (host->pdata->num_slots)
3114		host->num_slots = host->pdata->num_slots;
3115	else
3116		host->num_slots = 1;
3117
3118	if (host->num_slots < 1 ||
3119	    host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
3120		dev_err(host->dev,
3121			"Platform data must supply correct num_slots.\n");
3122		ret = -ENODEV;
3123		goto err_clk_ciu;
3124	}
3125
3126	/*
3127	 * Enable interrupts for command done, data over, data empty,
3128	 * receive ready and error such as transmit, receive timeout, crc error
3129	 */
 
3130	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3131		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3132		   DW_MCI_ERROR_FLAGS);
3133	/* Enable mci interrupt */
3134	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3135
3136	dev_info(host->dev,
3137		 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
 
3138		 host->irq, width, fifo_size);
3139
3140	/* We need at least one slot to succeed */
3141	for (i = 0; i < host->num_slots; i++) {
3142		ret = dw_mci_init_slot(host, i);
3143		if (ret)
3144			dev_dbg(host->dev, "slot %d init failed\n", i);
3145		else
3146			init_slots++;
3147	}
3148
3149	if (init_slots) {
3150		dev_info(host->dev, "%d slots initialized\n", init_slots);
3151	} else {
3152		dev_dbg(host->dev,
3153			"attempted to initialize %d slots, but failed on all\n",
3154			host->num_slots);
3155		goto err_dmaunmap;
3156	}
3157
3158	/* Now that slots are all setup, we can enable card detect */
3159	dw_mci_enable_cd(host);
3160
3161	return 0;
3162
 
 
 
3163err_dmaunmap:
3164	if (host->use_dma && host->dma_ops->exit)
3165		host->dma_ops->exit(host);
3166
 
 
 
 
3167err_clk_ciu:
3168	if (!IS_ERR(host->ciu_clk))
3169		clk_disable_unprepare(host->ciu_clk);
3170
3171err_clk_biu:
3172	if (!IS_ERR(host->biu_clk))
3173		clk_disable_unprepare(host->biu_clk);
3174
3175	return ret;
3176}
3177EXPORT_SYMBOL(dw_mci_probe);
3178
3179void dw_mci_remove(struct dw_mci *host)
3180{
3181	int i;
3182
 
 
 
3183	for (i = 0; i < host->num_slots; i++) {
3184		dev_dbg(host->dev, "remove slot %d\n", i);
3185		if (host->slot[i])
3186			dw_mci_cleanup_slot(host->slot[i], i);
3187	}
3188
3189	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3190	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3191
3192	/* disable clock to CIU */
3193	mci_writel(host, CLKENA, 0);
3194	mci_writel(host, CLKSRC, 0);
3195
 
 
3196	if (host->use_dma && host->dma_ops->exit)
3197		host->dma_ops->exit(host);
3198
 
 
 
3199	if (!IS_ERR(host->ciu_clk))
3200		clk_disable_unprepare(host->ciu_clk);
3201
3202	if (!IS_ERR(host->biu_clk))
3203		clk_disable_unprepare(host->biu_clk);
3204}
3205EXPORT_SYMBOL(dw_mci_remove);
3206
3207
3208
3209#ifdef CONFIG_PM_SLEEP
3210/*
3211 * TODO: we should probably disable the clock to the card in the suspend path.
3212 */
3213int dw_mci_suspend(struct dw_mci *host)
3214{
3215	if (host->use_dma && host->dma_ops->exit)
3216		host->dma_ops->exit(host);
3217
3218	return 0;
3219}
3220EXPORT_SYMBOL(dw_mci_suspend);
3221
3222int dw_mci_resume(struct dw_mci *host)
3223{
3224	int i, ret;
3225
3226	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
 
 
 
 
 
 
 
 
 
3227		ret = -ENODEV;
3228		return ret;
3229	}
3230
3231	if (host->use_dma && host->dma_ops->init)
3232		host->dma_ops->init(host);
3233
3234	/*
3235	 * Restore the initial value at FIFOTH register
3236	 * And Invalidate the prev_blksz with zero
3237	 */
3238	mci_writel(host, FIFOTH, host->fifoth_val);
3239	host->prev_blksz = 0;
3240
3241	/* Put in max timeout */
3242	mci_writel(host, TMOUT, 0xFFFFFFFF);
3243
3244	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3245	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3246		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3247		   DW_MCI_ERROR_FLAGS);
3248	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3249
3250	for (i = 0; i < host->num_slots; i++) {
3251		struct dw_mci_slot *slot = host->slot[i];
3252
3253		if (!slot)
3254			continue;
3255		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3256			dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3257			dw_mci_setup_bus(slot, true);
3258		}
3259	}
3260
3261	/* Now that slots are all setup, we can enable card detect */
3262	dw_mci_enable_cd(host);
3263
3264	return 0;
3265}
3266EXPORT_SYMBOL(dw_mci_resume);
3267#endif /* CONFIG_PM_SLEEP */
3268
3269static int __init dw_mci_init(void)
3270{
3271	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3272	return 0;
3273}
3274
3275static void __exit dw_mci_exit(void)
3276{
3277}
3278
3279module_init(dw_mci_init);
3280module_exit(dw_mci_exit);
3281
3282MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3283MODULE_AUTHOR("NXP Semiconductor VietNam");
3284MODULE_AUTHOR("Imagination Technologies Ltd");
3285MODULE_LICENSE("GPL v2");
v3.15
   1/*
   2 * Synopsys DesignWare Multimedia Card Interface driver
   3 *  (Based on NXP driver for lpc 31xx)
   4 *
   5 * Copyright (C) 2009 NXP Semiconductors
   6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14#include <linux/blkdev.h>
  15#include <linux/clk.h>
  16#include <linux/debugfs.h>
  17#include <linux/device.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/err.h>
  20#include <linux/init.h>
  21#include <linux/interrupt.h>
  22#include <linux/ioport.h>
  23#include <linux/module.h>
  24#include <linux/platform_device.h>
  25#include <linux/seq_file.h>
  26#include <linux/slab.h>
  27#include <linux/stat.h>
  28#include <linux/delay.h>
  29#include <linux/irq.h>
 
  30#include <linux/mmc/host.h>
  31#include <linux/mmc/mmc.h>
 
  32#include <linux/mmc/sdio.h>
  33#include <linux/mmc/dw_mmc.h>
  34#include <linux/bitops.h>
  35#include <linux/regulator/consumer.h>
  36#include <linux/workqueue.h>
  37#include <linux/of.h>
  38#include <linux/of_gpio.h>
  39#include <linux/mmc/slot-gpio.h>
  40
  41#include "dw_mmc.h"
  42
  43/* Common flag combinations */
  44#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
  45				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
  46				 SDMMC_INT_EBE)
  47#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  48				 SDMMC_INT_RESP_ERR)
  49#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
  50				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
  51#define DW_MCI_SEND_STATUS	1
  52#define DW_MCI_RECV_STATUS	2
  53#define DW_MCI_DMA_THRESHOLD	16
  54
  55#define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
  56#define DW_MCI_FREQ_MIN	400000		/* unit: HZ */
  57
  58#ifdef CONFIG_MMC_DW_IDMAC
  59#define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
  60				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
  61				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
  62				 SDMMC_IDMAC_INT_TI)
  63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64struct idmac_desc {
  65	u32		des0;	/* Control Descriptor */
  66#define IDMAC_DES0_DIC	BIT(1)
  67#define IDMAC_DES0_LD	BIT(2)
  68#define IDMAC_DES0_FD	BIT(3)
  69#define IDMAC_DES0_CH	BIT(4)
  70#define IDMAC_DES0_ER	BIT(5)
  71#define IDMAC_DES0_CES	BIT(30)
  72#define IDMAC_DES0_OWN	BIT(31)
  73
  74	u32		des1;	/* Buffer sizes */
  75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
  76	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
  77
  78	u32		des2;	/* buffer 1 physical address */
  79
  80	u32		des3;	/* buffer 2 physical address */
  81};
  82#endif /* CONFIG_MMC_DW_IDMAC */
  83
  84static const u8 tuning_blk_pattern_4bit[] = {
  85	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  86	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  87	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  88	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  89	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  90	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  91	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  92	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  93};
  94
  95static const u8 tuning_blk_pattern_8bit[] = {
  96	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  97	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  98	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  99	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
 100	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
 101	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
 102	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
 103	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
 104	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
 105	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
 106	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
 107	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
 108	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
 109	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
 110	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
 111	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
 112};
 113
 114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
 115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
 
 116
 117#if defined(CONFIG_DEBUG_FS)
 118static int dw_mci_req_show(struct seq_file *s, void *v)
 119{
 120	struct dw_mci_slot *slot = s->private;
 121	struct mmc_request *mrq;
 122	struct mmc_command *cmd;
 123	struct mmc_command *stop;
 124	struct mmc_data	*data;
 125
 126	/* Make sure we get a consistent snapshot */
 127	spin_lock_bh(&slot->host->lock);
 128	mrq = slot->mrq;
 129
 130	if (mrq) {
 131		cmd = mrq->cmd;
 132		data = mrq->data;
 133		stop = mrq->stop;
 134
 135		if (cmd)
 136			seq_printf(s,
 137				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 138				   cmd->opcode, cmd->arg, cmd->flags,
 139				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
 140				   cmd->resp[2], cmd->error);
 141		if (data)
 142			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
 143				   data->bytes_xfered, data->blocks,
 144				   data->blksz, data->flags, data->error);
 145		if (stop)
 146			seq_printf(s,
 147				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 148				   stop->opcode, stop->arg, stop->flags,
 149				   stop->resp[0], stop->resp[1], stop->resp[2],
 150				   stop->resp[2], stop->error);
 151	}
 152
 153	spin_unlock_bh(&slot->host->lock);
 154
 155	return 0;
 156}
 157
 158static int dw_mci_req_open(struct inode *inode, struct file *file)
 159{
 160	return single_open(file, dw_mci_req_show, inode->i_private);
 161}
 162
 163static const struct file_operations dw_mci_req_fops = {
 164	.owner		= THIS_MODULE,
 165	.open		= dw_mci_req_open,
 166	.read		= seq_read,
 167	.llseek		= seq_lseek,
 168	.release	= single_release,
 169};
 170
 171static int dw_mci_regs_show(struct seq_file *s, void *v)
 172{
 173	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
 174	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
 175	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
 176	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
 177	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
 178	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
 179
 180	return 0;
 181}
 182
 183static int dw_mci_regs_open(struct inode *inode, struct file *file)
 184{
 185	return single_open(file, dw_mci_regs_show, inode->i_private);
 186}
 187
 188static const struct file_operations dw_mci_regs_fops = {
 189	.owner		= THIS_MODULE,
 190	.open		= dw_mci_regs_open,
 191	.read		= seq_read,
 192	.llseek		= seq_lseek,
 193	.release	= single_release,
 194};
 195
 196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
 197{
 198	struct mmc_host	*mmc = slot->mmc;
 199	struct dw_mci *host = slot->host;
 200	struct dentry *root;
 201	struct dentry *node;
 202
 203	root = mmc->debugfs_root;
 204	if (!root)
 205		return;
 206
 207	node = debugfs_create_file("regs", S_IRUSR, root, host,
 208				   &dw_mci_regs_fops);
 209	if (!node)
 210		goto err;
 211
 212	node = debugfs_create_file("req", S_IRUSR, root, slot,
 213				   &dw_mci_req_fops);
 214	if (!node)
 215		goto err;
 216
 217	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
 218	if (!node)
 219		goto err;
 220
 221	node = debugfs_create_x32("pending_events", S_IRUSR, root,
 222				  (u32 *)&host->pending_events);
 223	if (!node)
 224		goto err;
 225
 226	node = debugfs_create_x32("completed_events", S_IRUSR, root,
 227				  (u32 *)&host->completed_events);
 228	if (!node)
 229		goto err;
 230
 231	return;
 232
 233err:
 234	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
 235}
 236#endif /* defined(CONFIG_DEBUG_FS) */
 237
 238static void dw_mci_set_timeout(struct dw_mci *host)
 239{
 240	/* timeout (maximum) */
 241	mci_writel(host, TMOUT, 0xffffffff);
 242}
 243
 244static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
 245{
 246	struct mmc_data	*data;
 247	struct dw_mci_slot *slot = mmc_priv(mmc);
 248	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
 249	u32 cmdr;
 
 250	cmd->error = -EINPROGRESS;
 251
 252	cmdr = cmd->opcode;
 253
 254	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
 255	    cmd->opcode == MMC_GO_IDLE_STATE ||
 256	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
 257	    (cmd->opcode == SD_IO_RW_DIRECT &&
 258	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
 259		cmdr |= SDMMC_CMD_STOP;
 260	else
 261		if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
 262			cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263
 264	if (cmd->flags & MMC_RSP_PRESENT) {
 265		/* We expect a response, so set this bit */
 266		cmdr |= SDMMC_CMD_RESP_EXP;
 267		if (cmd->flags & MMC_RSP_136)
 268			cmdr |= SDMMC_CMD_RESP_LONG;
 269	}
 270
 271	if (cmd->flags & MMC_RSP_CRC)
 272		cmdr |= SDMMC_CMD_RESP_CRC;
 273
 274	data = cmd->data;
 275	if (data) {
 276		cmdr |= SDMMC_CMD_DAT_EXP;
 277		if (data->flags & MMC_DATA_STREAM)
 278			cmdr |= SDMMC_CMD_STRM_MODE;
 279		if (data->flags & MMC_DATA_WRITE)
 280			cmdr |= SDMMC_CMD_DAT_WR;
 281	}
 282
 283	if (drv_data && drv_data->prepare_command)
 284		drv_data->prepare_command(slot->host, &cmdr);
 285
 286	return cmdr;
 287}
 288
 289static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
 290{
 291	struct mmc_command *stop;
 292	u32 cmdr;
 293
 294	if (!cmd->data)
 295		return 0;
 296
 297	stop = &host->stop_abort;
 298	cmdr = cmd->opcode;
 299	memset(stop, 0, sizeof(struct mmc_command));
 300
 301	if (cmdr == MMC_READ_SINGLE_BLOCK ||
 302	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
 303	    cmdr == MMC_WRITE_BLOCK ||
 304	    cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
 
 
 305		stop->opcode = MMC_STOP_TRANSMISSION;
 306		stop->arg = 0;
 307		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 308	} else if (cmdr == SD_IO_RW_EXTENDED) {
 309		stop->opcode = SD_IO_RW_DIRECT;
 310		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
 311			     ((cmd->arg >> 28) & 0x7);
 312		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
 313	} else {
 314		return 0;
 315	}
 316
 317	cmdr = stop->opcode | SDMMC_CMD_STOP |
 318		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
 319
 320	return cmdr;
 321}
 322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323static void dw_mci_start_command(struct dw_mci *host,
 324				 struct mmc_command *cmd, u32 cmd_flags)
 325{
 326	host->cmd = cmd;
 327	dev_vdbg(host->dev,
 328		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
 329		 cmd->arg, cmd_flags);
 330
 331	mci_writel(host, CMDARG, cmd->arg);
 332	wmb();
 
 333
 334	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
 335}
 336
 337static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
 338{
 339	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
 
 340	dw_mci_start_command(host, stop, host->stop_cmdr);
 341}
 342
 343/* DMA interface functions */
 344static void dw_mci_stop_dma(struct dw_mci *host)
 345{
 346	if (host->using_dma) {
 347		host->dma_ops->stop(host);
 348		host->dma_ops->cleanup(host);
 349	}
 350
 351	/* Data transfer was stopped by the interrupt handler */
 352	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 353}
 354
 355static int dw_mci_get_dma_dir(struct mmc_data *data)
 356{
 357	if (data->flags & MMC_DATA_WRITE)
 358		return DMA_TO_DEVICE;
 359	else
 360		return DMA_FROM_DEVICE;
 361}
 362
 363#ifdef CONFIG_MMC_DW_IDMAC
 364static void dw_mci_dma_cleanup(struct dw_mci *host)
 365{
 366	struct mmc_data *data = host->data;
 367
 368	if (data)
 369		if (!data->host_cookie)
 370			dma_unmap_sg(host->dev,
 371				     data->sg,
 372				     data->sg_len,
 373				     dw_mci_get_dma_dir(data));
 374}
 375
 376static void dw_mci_idmac_reset(struct dw_mci *host)
 377{
 378	u32 bmod = mci_readl(host, BMOD);
 379	/* Software reset of DMA */
 380	bmod |= SDMMC_IDMAC_SWRESET;
 381	mci_writel(host, BMOD, bmod);
 382}
 383
 384static void dw_mci_idmac_stop_dma(struct dw_mci *host)
 385{
 386	u32 temp;
 387
 388	/* Disable and reset the IDMAC interface */
 389	temp = mci_readl(host, CTRL);
 390	temp &= ~SDMMC_CTRL_USE_IDMAC;
 391	temp |= SDMMC_CTRL_DMA_RESET;
 392	mci_writel(host, CTRL, temp);
 393
 394	/* Stop the IDMAC running */
 395	temp = mci_readl(host, BMOD);
 396	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
 397	temp |= SDMMC_IDMAC_SWRESET;
 398	mci_writel(host, BMOD, temp);
 399}
 400
 401static void dw_mci_idmac_complete_dma(struct dw_mci *host)
 402{
 
 403	struct mmc_data *data = host->data;
 404
 405	dev_vdbg(host->dev, "DMA complete\n");
 406
 
 
 
 
 
 
 
 
 407	host->dma_ops->cleanup(host);
 408
 409	/*
 410	 * If the card was removed, data will be NULL. No point in trying to
 411	 * send the stop command or waiting for NBUSY in this case.
 412	 */
 413	if (data) {
 414		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 415		tasklet_schedule(&host->tasklet);
 416	}
 417}
 418
 419static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
 420				    unsigned int sg_len)
 421{
 
 422	int i;
 423	struct idmac_desc *desc = host->sg_cpu;
 424
 425	for (i = 0; i < sg_len; i++, desc++) {
 426		unsigned int length = sg_dma_len(&data->sg[i]);
 427		u32 mem_addr = sg_dma_address(&data->sg[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 428
 429		/* Set the OWN bit and disable interrupts for this descriptor */
 430		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
 431
 432		/* Buffer length */
 433		IDMAC_SET_BUFFER1_SIZE(desc, length);
 434
 435		/* Physical address to DMA to/from */
 436		desc->des2 = mem_addr;
 437	}
 
 
 
 
 438
 439	/* Set first descriptor */
 440	desc = host->sg_cpu;
 441	desc->des0 |= IDMAC_DES0_FD;
 442
 443	/* Set last descriptor */
 444	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
 445	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
 446	desc->des0 |= IDMAC_DES0_LD;
 
 447
 448	wmb();
 449}
 450
 451static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 452{
 453	u32 temp;
 454
 455	dw_mci_translate_sglist(host, host->data, sg_len);
 456
 
 
 
 
 457	/* Select IDMAC interface */
 458	temp = mci_readl(host, CTRL);
 459	temp |= SDMMC_CTRL_USE_IDMAC;
 460	mci_writel(host, CTRL, temp);
 461
 
 462	wmb();
 463
 464	/* Enable the IDMAC */
 465	temp = mci_readl(host, BMOD);
 466	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
 467	mci_writel(host, BMOD, temp);
 468
 469	/* Start it running */
 470	mci_writel(host, PLDMND, 1);
 
 
 471}
 472
 473static int dw_mci_idmac_init(struct dw_mci *host)
 474{
 475	struct idmac_desc *p;
 476	int i;
 477
 478	/* Number of descriptors in the ring buffer */
 479	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480
 481	/* Forward link the descriptor list */
 482	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
 483		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
 484
 485	/* Set the last descriptor as the end-of-ring descriptor */
 486	p->des3 = host->sg_dma;
 487	p->des0 = IDMAC_DES0_ER;
 
 
 
 
 
 
 
 
 
 
 
 488
 489	dw_mci_idmac_reset(host);
 490
 491	/* Mask out interrupts - get Tx & Rx complete only */
 492	mci_writel(host, IDSTS, IDMAC_INT_CLR);
 493	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
 494		   SDMMC_IDMAC_INT_TI);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495
 496	/* Set the descriptor base address */
 497	mci_writel(host, DBADDR, host->sg_dma);
 498	return 0;
 499}
 500
 501static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
 502	.init = dw_mci_idmac_init,
 503	.start = dw_mci_idmac_start_dma,
 504	.stop = dw_mci_idmac_stop_dma,
 505	.complete = dw_mci_idmac_complete_dma,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506	.cleanup = dw_mci_dma_cleanup,
 507};
 508#endif /* CONFIG_MMC_DW_IDMAC */
 509
 510static int dw_mci_pre_dma_transfer(struct dw_mci *host,
 511				   struct mmc_data *data,
 512				   bool next)
 513{
 514	struct scatterlist *sg;
 515	unsigned int i, sg_len;
 516
 517	if (!next && data->host_cookie)
 518		return data->host_cookie;
 519
 520	/*
 521	 * We don't do DMA on "complex" transfers, i.e. with
 522	 * non-word-aligned buffers or lengths. Also, we don't bother
 523	 * with all the DMA setup overhead for short transfers.
 524	 */
 525	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
 526		return -EINVAL;
 527
 528	if (data->blksz & 3)
 529		return -EINVAL;
 530
 531	for_each_sg(data->sg, sg, data->sg_len, i) {
 532		if (sg->offset & 3 || sg->length & 3)
 533			return -EINVAL;
 534	}
 535
 536	sg_len = dma_map_sg(host->dev,
 537			    data->sg,
 538			    data->sg_len,
 539			    dw_mci_get_dma_dir(data));
 540	if (sg_len == 0)
 541		return -EINVAL;
 542
 543	if (next)
 544		data->host_cookie = sg_len;
 545
 546	return sg_len;
 547}
 548
 549static void dw_mci_pre_req(struct mmc_host *mmc,
 550			   struct mmc_request *mrq,
 551			   bool is_first_req)
 552{
 553	struct dw_mci_slot *slot = mmc_priv(mmc);
 554	struct mmc_data *data = mrq->data;
 555
 556	if (!slot->host->use_dma || !data)
 557		return;
 558
 559	if (data->host_cookie) {
 560		data->host_cookie = 0;
 561		return;
 562	}
 563
 564	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
 565		data->host_cookie = 0;
 566}
 567
 568static void dw_mci_post_req(struct mmc_host *mmc,
 569			    struct mmc_request *mrq,
 570			    int err)
 571{
 572	struct dw_mci_slot *slot = mmc_priv(mmc);
 573	struct mmc_data *data = mrq->data;
 574
 575	if (!slot->host->use_dma || !data)
 576		return;
 577
 578	if (data->host_cookie)
 579		dma_unmap_sg(slot->host->dev,
 580			     data->sg,
 581			     data->sg_len,
 582			     dw_mci_get_dma_dir(data));
 583	data->host_cookie = 0;
 584}
 585
 586static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
 587{
 588#ifdef CONFIG_MMC_DW_IDMAC
 589	unsigned int blksz = data->blksz;
 590	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 591	u32 fifo_width = 1 << host->data_shift;
 592	u32 blksz_depth = blksz / fifo_width, fifoth_val;
 593	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
 594	int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
 
 
 
 
 595
 596	tx_wmark = (host->fifo_depth) / 2;
 597	tx_wmark_invers = host->fifo_depth - tx_wmark;
 598
 599	/*
 600	 * MSIZE is '1',
 601	 * if blksz is not a multiple of the FIFO width
 602	 */
 603	if (blksz % fifo_width) {
 604		msize = 0;
 605		rx_wmark = 1;
 606		goto done;
 607	}
 608
 609	do {
 610		if (!((blksz_depth % mszs[idx]) ||
 611		     (tx_wmark_invers % mszs[idx]))) {
 612			msize = idx;
 613			rx_wmark = mszs[idx] - 1;
 614			break;
 615		}
 616	} while (--idx > 0);
 617	/*
 618	 * If idx is '0', it won't be tried
 619	 * Thus, initial values are uesed
 620	 */
 621done:
 622	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
 623	mci_writel(host, FIFOTH, fifoth_val);
 624#endif
 625}
 626
 627static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
 628{
 629	unsigned int blksz = data->blksz;
 630	u32 blksz_depth, fifo_depth;
 631	u16 thld_size;
 632
 633	WARN_ON(!(data->flags & MMC_DATA_READ));
 634
 
 
 
 
 
 
 
 635	if (host->timing != MMC_TIMING_MMC_HS200 &&
 
 636	    host->timing != MMC_TIMING_UHS_SDR104)
 637		goto disable;
 638
 639	blksz_depth = blksz / (1 << host->data_shift);
 640	fifo_depth = host->fifo_depth;
 641
 642	if (blksz_depth > fifo_depth)
 643		goto disable;
 644
 645	/*
 646	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
 647	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
 648	 * Currently just choose blksz.
 649	 */
 650	thld_size = blksz;
 651	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
 652	return;
 653
 654disable:
 655	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
 656}
 657
 658static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
 659{
 
 660	int sg_len;
 661	u32 temp;
 662
 663	host->using_dma = 0;
 664
 665	/* If we don't have a channel, we can't do DMA */
 666	if (!host->use_dma)
 667		return -ENODEV;
 668
 669	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
 670	if (sg_len < 0) {
 671		host->dma_ops->stop(host);
 672		return sg_len;
 673	}
 674
 675	host->using_dma = 1;
 676
 677	dev_vdbg(host->dev,
 678		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
 679		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
 680		 sg_len);
 
 
 681
 682	/*
 683	 * Decide the MSIZE and RX/TX Watermark.
 684	 * If current block size is same with previous size,
 685	 * no need to update fifoth.
 686	 */
 687	if (host->prev_blksz != data->blksz)
 688		dw_mci_adjust_fifoth(host, data);
 689
 690	/* Enable the DMA interface */
 691	temp = mci_readl(host, CTRL);
 692	temp |= SDMMC_CTRL_DMA_ENABLE;
 693	mci_writel(host, CTRL, temp);
 694
 695	/* Disable RX/TX IRQs, let DMA handle it */
 
 696	temp = mci_readl(host, INTMASK);
 697	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
 698	mci_writel(host, INTMASK, temp);
 
 699
 700	host->dma_ops->start(host, sg_len);
 
 
 
 
 701
 702	return 0;
 703}
 704
 705static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
 706{
 
 
 707	u32 temp;
 708
 709	data->error = -EINPROGRESS;
 710
 711	WARN_ON(host->data);
 712	host->sg = NULL;
 713	host->data = data;
 714
 715	if (data->flags & MMC_DATA_READ) {
 716		host->dir_status = DW_MCI_RECV_STATUS;
 717		dw_mci_ctrl_rd_thld(host, data);
 718	} else {
 719		host->dir_status = DW_MCI_SEND_STATUS;
 720	}
 721
 722	if (dw_mci_submit_data_dma(host, data)) {
 723		int flags = SG_MITER_ATOMIC;
 724		if (host->data->flags & MMC_DATA_READ)
 725			flags |= SG_MITER_TO_SG;
 726		else
 727			flags |= SG_MITER_FROM_SG;
 728
 729		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
 730		host->sg = data->sg;
 731		host->part_buf_start = 0;
 732		host->part_buf_count = 0;
 733
 734		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
 
 
 735		temp = mci_readl(host, INTMASK);
 736		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
 737		mci_writel(host, INTMASK, temp);
 
 738
 739		temp = mci_readl(host, CTRL);
 740		temp &= ~SDMMC_CTRL_DMA_ENABLE;
 741		mci_writel(host, CTRL, temp);
 742
 743		/*
 744		 * Use the initial fifoth_val for PIO mode.
 745		 * If next issued data may be transfered by DMA mode,
 746		 * prev_blksz should be invalidated.
 747		 */
 748		mci_writel(host, FIFOTH, host->fifoth_val);
 749		host->prev_blksz = 0;
 750	} else {
 751		/*
 752		 * Keep the current block size.
 753		 * It will be used to decide whether to update
 754		 * fifoth register next time.
 755		 */
 756		host->prev_blksz = data->blksz;
 757	}
 758}
 759
 760static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
 761{
 762	struct dw_mci *host = slot->host;
 763	unsigned long timeout = jiffies + msecs_to_jiffies(500);
 764	unsigned int cmd_status = 0;
 765
 766	mci_writel(host, CMDARG, arg);
 767	wmb();
 
 768	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
 769
 770	while (time_before(jiffies, timeout)) {
 771		cmd_status = mci_readl(host, CMD);
 772		if (!(cmd_status & SDMMC_CMD_START))
 773			return;
 774	}
 775	dev_err(&slot->mmc->class_dev,
 776		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
 777		cmd, arg, cmd_status);
 778}
 779
 780static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 781{
 782	struct dw_mci *host = slot->host;
 783	unsigned int clock = slot->clock;
 784	u32 div;
 785	u32 clk_en_a;
 
 
 
 
 
 786
 787	if (!clock) {
 788		mci_writel(host, CLKENA, 0);
 789		mci_send_cmd(slot,
 790			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 791	} else if (clock != host->current_speed || force_clkinit) {
 792		div = host->bus_hz / clock;
 793		if (host->bus_hz % clock && host->bus_hz > clock)
 794			/*
 795			 * move the + 1 after the divide to prevent
 796			 * over-clocking the card.
 797			 */
 798			div += 1;
 799
 800		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
 801
 802		if ((clock << div) != slot->__clk_old || force_clkinit)
 803			dev_info(&slot->mmc->class_dev,
 804				 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
 805				 slot->id, host->bus_hz, clock,
 806				 div ? ((host->bus_hz / div) >> 1) :
 807				 host->bus_hz, div);
 808
 809		/* disable clock */
 810		mci_writel(host, CLKENA, 0);
 811		mci_writel(host, CLKSRC, 0);
 812
 813		/* inform CIU */
 814		mci_send_cmd(slot,
 815			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 816
 817		/* set clock to desired speed */
 818		mci_writel(host, CLKDIV, div);
 819
 820		/* inform CIU */
 821		mci_send_cmd(slot,
 822			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 823
 824		/* enable clock; only low power if no SDIO */
 825		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
 826		if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
 827			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
 828		mci_writel(host, CLKENA, clk_en_a);
 829
 830		/* inform CIU */
 831		mci_send_cmd(slot,
 832			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 833
 834		/* keep the clock with reflecting clock dividor */
 835		slot->__clk_old = clock << div;
 836	}
 837
 838	host->current_speed = clock;
 839
 840	/* Set the current slot bus width */
 841	mci_writel(host, CTYPE, (slot->ctype << slot->id));
 842}
 843
 844static void __dw_mci_start_request(struct dw_mci *host,
 845				   struct dw_mci_slot *slot,
 846				   struct mmc_command *cmd)
 847{
 848	struct mmc_request *mrq;
 849	struct mmc_data	*data;
 850	u32 cmdflags;
 851
 852	mrq = slot->mrq;
 853	if (host->pdata->select_slot)
 854		host->pdata->select_slot(slot->id);
 855
 856	host->cur_slot = slot;
 857	host->mrq = mrq;
 858
 859	host->pending_events = 0;
 860	host->completed_events = 0;
 861	host->cmd_status = 0;
 862	host->data_status = 0;
 863	host->dir_status = 0;
 864
 865	data = cmd->data;
 866	if (data) {
 867		dw_mci_set_timeout(host);
 868		mci_writel(host, BYTCNT, data->blksz*data->blocks);
 869		mci_writel(host, BLKSIZ, data->blksz);
 870	}
 871
 872	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
 873
 874	/* this is the first command, send the initialization clock */
 875	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
 876		cmdflags |= SDMMC_CMD_INIT;
 877
 878	if (data) {
 879		dw_mci_submit_data(host, data);
 880		wmb();
 881	}
 882
 883	dw_mci_start_command(host, cmd, cmdflags);
 884
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 885	if (mrq->stop)
 886		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
 887	else
 888		host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
 889}
 890
 891static void dw_mci_start_request(struct dw_mci *host,
 892				 struct dw_mci_slot *slot)
 893{
 894	struct mmc_request *mrq = slot->mrq;
 895	struct mmc_command *cmd;
 896
 897	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
 898	__dw_mci_start_request(host, slot, cmd);
 899}
 900
 901/* must be called with host->lock held */
 902static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
 903				 struct mmc_request *mrq)
 904{
 905	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
 906		 host->state);
 907
 908	slot->mrq = mrq;
 909
 
 
 
 
 
 
 
 
 
 
 
 910	if (host->state == STATE_IDLE) {
 911		host->state = STATE_SENDING_CMD;
 912		dw_mci_start_request(host, slot);
 913	} else {
 914		list_add_tail(&slot->queue_node, &host->queue);
 915	}
 916}
 917
 918static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 919{
 920	struct dw_mci_slot *slot = mmc_priv(mmc);
 921	struct dw_mci *host = slot->host;
 922
 923	WARN_ON(slot->mrq);
 924
 925	/*
 926	 * The check for card presence and queueing of the request must be
 927	 * atomic, otherwise the card could be removed in between and the
 928	 * request wouldn't fail until another card was inserted.
 929	 */
 930	spin_lock_bh(&host->lock);
 931
 932	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
 933		spin_unlock_bh(&host->lock);
 934		mrq->cmd->error = -ENOMEDIUM;
 935		mmc_request_done(mmc, mrq);
 936		return;
 937	}
 938
 939	dw_mci_queue_request(host, slot, mrq);
 940
 941	spin_unlock_bh(&host->lock);
 942}
 943
 944static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 945{
 946	struct dw_mci_slot *slot = mmc_priv(mmc);
 947	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
 948	u32 regs;
 
 949
 950	switch (ios->bus_width) {
 951	case MMC_BUS_WIDTH_4:
 952		slot->ctype = SDMMC_CTYPE_4BIT;
 953		break;
 954	case MMC_BUS_WIDTH_8:
 955		slot->ctype = SDMMC_CTYPE_8BIT;
 956		break;
 957	default:
 958		/* set default 1 bit mode */
 959		slot->ctype = SDMMC_CTYPE_1BIT;
 960	}
 961
 962	regs = mci_readl(slot->host, UHS_REG);
 963
 964	/* DDR mode set */
 965	if (ios->timing == MMC_TIMING_UHS_DDR50)
 
 
 966		regs |= ((0x1 << slot->id) << 16);
 967	else
 968		regs &= ~((0x1 << slot->id) << 16);
 969
 970	mci_writel(slot->host, UHS_REG, regs);
 971	slot->host->timing = ios->timing;
 972
 973	/*
 974	 * Use mirror of ios->clock to prevent race with mmc
 975	 * core ios update when finding the minimum.
 976	 */
 977	slot->clock = ios->clock;
 978
 979	if (drv_data && drv_data->set_ios)
 980		drv_data->set_ios(slot->host, ios);
 981
 982	/* Slot specific timing and width adjustment */
 983	dw_mci_setup_bus(slot, false);
 984
 985	switch (ios->power_mode) {
 986	case MMC_POWER_UP:
 
 
 
 
 
 
 
 
 
 
 987		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
 988		/* Power up slot */
 989		if (slot->host->pdata->setpower)
 990			slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
 991		regs = mci_readl(slot->host, PWREN);
 992		regs |= (1 << slot->id);
 993		mci_writel(slot->host, PWREN, regs);
 994		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995	case MMC_POWER_OFF:
 996		/* Power down slot */
 997		if (slot->host->pdata->setpower)
 998			slot->host->pdata->setpower(slot->id, 0);
 
 
 
 
 
 
 
 999		regs = mci_readl(slot->host, PWREN);
1000		regs &= ~(1 << slot->id);
1001		mci_writel(slot->host, PWREN, regs);
1002		break;
1003	default:
1004		break;
1005	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006}
1007
1008static int dw_mci_get_ro(struct mmc_host *mmc)
1009{
1010	int read_only;
1011	struct dw_mci_slot *slot = mmc_priv(mmc);
1012	struct dw_mci_board *brd = slot->host->pdata;
1013
1014	/* Use platform get_ro function, else try on board write protect */
1015	if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1016		read_only = 0;
1017	else if (brd->get_ro)
1018		read_only = brd->get_ro(slot->id);
1019	else if (gpio_is_valid(slot->wp_gpio))
1020		read_only = gpio_get_value(slot->wp_gpio);
1021	else
1022		read_only =
1023			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1024
1025	dev_dbg(&mmc->class_dev, "card is %s\n",
1026		read_only ? "read-only" : "read-write");
1027
1028	return read_only;
1029}
1030
1031static int dw_mci_get_cd(struct mmc_host *mmc)
1032{
1033	int present;
1034	struct dw_mci_slot *slot = mmc_priv(mmc);
1035	struct dw_mci_board *brd = slot->host->pdata;
1036	struct dw_mci *host = slot->host;
1037	int gpio_cd = mmc_gpio_get_cd(mmc);
1038
1039	/* Use platform get_cd function, else try onboard card detect */
1040	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
 
1041		present = 1;
1042	else if (brd->get_cd)
1043		present = !brd->get_cd(slot->id);
1044	else if (!IS_ERR_VALUE(gpio_cd))
1045		present = gpio_cd;
1046	else
1047		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1048			== 0 ? 1 : 0;
1049
1050	spin_lock_bh(&host->lock);
1051	if (present) {
1052		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1053		dev_dbg(&mmc->class_dev, "card is present\n");
1054	} else {
1055		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1056		dev_dbg(&mmc->class_dev, "card is not present\n");
1057	}
1058	spin_unlock_bh(&host->lock);
1059
1060	return present;
1061}
1062
1063/*
1064 * Disable lower power mode.
1065 *
1066 * Low power mode will stop the card clock when idle.  According to the
1067 * description of the CLKENA register we should disable low power mode
1068 * for SDIO cards if we need SDIO interrupts to work.
1069 *
1070 * This function is fast if low power mode is already disabled.
1071 */
1072static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073{
 
1074	struct dw_mci *host = slot->host;
1075	u32 clk_en_a;
1076	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1077
1078	clk_en_a = mci_readl(host, CLKENA);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079
1080	if (clk_en_a & clken_low_pwr) {
1081		mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1082		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1083			     SDMMC_CMD_PRV_DAT_WAIT, 0);
 
1084	}
1085}
1086
1087static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1088{
1089	struct dw_mci_slot *slot = mmc_priv(mmc);
1090	struct dw_mci *host = slot->host;
 
1091	u32 int_mask;
1092
 
 
1093	/* Enable/disable Slot Specific SDIO interrupt */
1094	int_mask = mci_readl(host, INTMASK);
1095	if (enb) {
1096		/*
1097		 * Turn off low power mode if it was enabled.  This is a bit of
1098		 * a heavy operation and we disable / enable IRQs a lot, so
1099		 * we'll leave low power mode disabled and it will get
1100		 * re-enabled again in dw_mci_setup_bus().
1101		 */
1102		dw_mci_disable_low_power(slot);
1103
1104		mci_writel(host, INTMASK,
1105			   (int_mask | SDMMC_INT_SDIO(slot->id)));
1106	} else {
1107		mci_writel(host, INTMASK,
1108			   (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1109	}
1110}
1111
1112static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1113{
1114	struct dw_mci_slot *slot = mmc_priv(mmc);
1115	struct dw_mci *host = slot->host;
1116	const struct dw_mci_drv_data *drv_data = host->drv_data;
1117	struct dw_mci_tuning_data tuning_data;
1118	int err = -ENOSYS;
1119
1120	if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1121		if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1122			tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1123			tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1124		} else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1125			tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1126			tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1127		} else {
1128			return -EINVAL;
1129		}
1130	} else if (opcode == MMC_SEND_TUNING_BLOCK) {
1131		tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1132		tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1133	} else {
1134		dev_err(host->dev,
1135			"Undefined command(%d) for tuning\n", opcode);
1136		return -EINVAL;
1137	}
1138
1139	if (drv_data && drv_data->execute_tuning)
1140		err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1141	return err;
1142}
1143
 
 
 
 
 
 
 
 
 
 
 
 
 
1144static const struct mmc_host_ops dw_mci_ops = {
1145	.request		= dw_mci_request,
1146	.pre_req		= dw_mci_pre_req,
1147	.post_req		= dw_mci_post_req,
1148	.set_ios		= dw_mci_set_ios,
1149	.get_ro			= dw_mci_get_ro,
1150	.get_cd			= dw_mci_get_cd,
 
1151	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
1152	.execute_tuning		= dw_mci_execute_tuning,
 
 
 
 
1153};
1154
1155static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1156	__releases(&host->lock)
1157	__acquires(&host->lock)
1158{
1159	struct dw_mci_slot *slot;
1160	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
1161
1162	WARN_ON(host->cmd || host->data);
1163
1164	host->cur_slot->mrq = NULL;
1165	host->mrq = NULL;
1166	if (!list_empty(&host->queue)) {
1167		slot = list_entry(host->queue.next,
1168				  struct dw_mci_slot, queue_node);
1169		list_del(&slot->queue_node);
1170		dev_vdbg(host->dev, "list not empty: %s is next\n",
1171			 mmc_hostname(slot->mmc));
1172		host->state = STATE_SENDING_CMD;
1173		dw_mci_start_request(host, slot);
1174	} else {
1175		dev_vdbg(host->dev, "list empty\n");
1176		host->state = STATE_IDLE;
 
 
 
 
1177	}
1178
1179	spin_unlock(&host->lock);
1180	mmc_request_done(prev_mmc, mrq);
1181	spin_lock(&host->lock);
1182}
1183
1184static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1185{
1186	u32 status = host->cmd_status;
1187
1188	host->cmd_status = 0;
1189
1190	/* Read the response from the card (up to 16 bytes) */
1191	if (cmd->flags & MMC_RSP_PRESENT) {
1192		if (cmd->flags & MMC_RSP_136) {
1193			cmd->resp[3] = mci_readl(host, RESP0);
1194			cmd->resp[2] = mci_readl(host, RESP1);
1195			cmd->resp[1] = mci_readl(host, RESP2);
1196			cmd->resp[0] = mci_readl(host, RESP3);
1197		} else {
1198			cmd->resp[0] = mci_readl(host, RESP0);
1199			cmd->resp[1] = 0;
1200			cmd->resp[2] = 0;
1201			cmd->resp[3] = 0;
1202		}
1203	}
1204
1205	if (status & SDMMC_INT_RTO)
1206		cmd->error = -ETIMEDOUT;
1207	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1208		cmd->error = -EILSEQ;
1209	else if (status & SDMMC_INT_RESP_ERR)
1210		cmd->error = -EIO;
1211	else
1212		cmd->error = 0;
1213
1214	if (cmd->error) {
1215		/* newer ip versions need a delay between retries */
1216		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1217			mdelay(20);
1218	}
1219
1220	return cmd->error;
1221}
1222
1223static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1224{
1225	u32 status = host->data_status;
1226
1227	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1228		if (status & SDMMC_INT_DRTO) {
1229			data->error = -ETIMEDOUT;
1230		} else if (status & SDMMC_INT_DCRC) {
1231			data->error = -EILSEQ;
1232		} else if (status & SDMMC_INT_EBE) {
1233			if (host->dir_status ==
1234				DW_MCI_SEND_STATUS) {
1235				/*
1236				 * No data CRC status was returned.
1237				 * The number of bytes transferred
1238				 * will be exaggerated in PIO mode.
1239				 */
1240				data->bytes_xfered = 0;
1241				data->error = -ETIMEDOUT;
1242			} else if (host->dir_status ==
1243					DW_MCI_RECV_STATUS) {
1244				data->error = -EIO;
1245			}
1246		} else {
1247			/* SDMMC_INT_SBE is included */
1248			data->error = -EIO;
1249		}
1250
1251		dev_err(host->dev, "data error, status 0x%08x\n", status);
1252
1253		/*
1254		 * After an error, there may be data lingering
1255		 * in the FIFO
1256		 */
1257		dw_mci_fifo_reset(host);
1258	} else {
1259		data->bytes_xfered = data->blocks * data->blksz;
1260		data->error = 0;
1261	}
1262
1263	return data->error;
1264}
1265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266static void dw_mci_tasklet_func(unsigned long priv)
1267{
1268	struct dw_mci *host = (struct dw_mci *)priv;
1269	struct mmc_data	*data;
1270	struct mmc_command *cmd;
1271	struct mmc_request *mrq;
1272	enum dw_mci_state state;
1273	enum dw_mci_state prev_state;
1274	unsigned int err;
1275
1276	spin_lock(&host->lock);
1277
1278	state = host->state;
1279	data = host->data;
1280	mrq = host->mrq;
1281
1282	do {
1283		prev_state = state;
1284
1285		switch (state) {
1286		case STATE_IDLE:
 
1287			break;
1288
 
1289		case STATE_SENDING_CMD:
1290			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1291						&host->pending_events))
1292				break;
1293
1294			cmd = host->cmd;
1295			host->cmd = NULL;
1296			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1297			err = dw_mci_command_complete(host, cmd);
1298			if (cmd == mrq->sbc && !err) {
1299				prev_state = state = STATE_SENDING_CMD;
1300				__dw_mci_start_request(host, host->cur_slot,
1301						       mrq->cmd);
1302				goto unlock;
1303			}
1304
1305			if (cmd->data && err) {
1306				dw_mci_stop_dma(host);
1307				send_stop_abort(host, data);
1308				state = STATE_SENDING_STOP;
1309				break;
1310			}
1311
1312			if (!cmd->data || err) {
1313				dw_mci_request_end(host, mrq);
1314				goto unlock;
1315			}
1316
1317			prev_state = state = STATE_SENDING_DATA;
1318			/* fall through */
1319
1320		case STATE_SENDING_DATA:
 
 
 
 
 
 
 
 
1321			if (test_and_clear_bit(EVENT_DATA_ERROR,
1322					       &host->pending_events)) {
1323				dw_mci_stop_dma(host);
1324				send_stop_abort(host, data);
 
 
 
1325				state = STATE_DATA_ERROR;
1326				break;
1327			}
1328
1329			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1330						&host->pending_events))
 
 
 
 
 
 
 
1331				break;
 
1332
1333			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334			prev_state = state = STATE_DATA_BUSY;
 
1335			/* fall through */
1336
1337		case STATE_DATA_BUSY:
1338			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1339						&host->pending_events))
 
 
 
 
 
 
 
 
1340				break;
 
1341
1342			host->data = NULL;
1343			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1344			err = dw_mci_data_complete(host, data);
1345
1346			if (!err) {
1347				if (!data->stop || mrq->sbc) {
1348					if (mrq->sbc && data->stop)
1349						data->stop->error = 0;
1350					dw_mci_request_end(host, mrq);
1351					goto unlock;
1352				}
1353
1354				/* stop command for open-ended transfer*/
1355				if (data->stop)
1356					send_stop_abort(host, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357			}
1358
1359			/*
1360			 * If err has non-zero,
1361			 * stop-abort command has been already issued.
1362			 */
1363			prev_state = state = STATE_SENDING_STOP;
1364
1365			/* fall through */
1366
1367		case STATE_SENDING_STOP:
1368			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1369						&host->pending_events))
1370				break;
1371
1372			/* CMD error in data command */
1373			if (mrq->cmd->error && mrq->data)
1374				dw_mci_fifo_reset(host);
1375
1376			host->cmd = NULL;
1377			host->data = NULL;
1378
1379			if (mrq->stop)
1380				dw_mci_command_complete(host, mrq->stop);
1381			else
1382				host->cmd_status = 0;
1383
1384			dw_mci_request_end(host, mrq);
1385			goto unlock;
1386
1387		case STATE_DATA_ERROR:
1388			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1389						&host->pending_events))
1390				break;
1391
1392			state = STATE_DATA_BUSY;
1393			break;
1394		}
1395	} while (state != prev_state);
1396
1397	host->state = state;
1398unlock:
1399	spin_unlock(&host->lock);
1400
1401}
1402
1403/* push final bytes to part_buf, only use during push */
1404static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1405{
1406	memcpy((void *)&host->part_buf, buf, cnt);
1407	host->part_buf_count = cnt;
1408}
1409
1410/* append bytes to part_buf, only use during push */
1411static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1412{
1413	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1414	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1415	host->part_buf_count += cnt;
1416	return cnt;
1417}
1418
1419/* pull first bytes from part_buf, only use during pull */
1420static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1421{
1422	cnt = min(cnt, (int)host->part_buf_count);
1423	if (cnt) {
1424		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1425		       cnt);
1426		host->part_buf_count -= cnt;
1427		host->part_buf_start += cnt;
1428	}
1429	return cnt;
1430}
1431
1432/* pull final bytes from the part_buf, assuming it's just been filled */
1433static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1434{
1435	memcpy(buf, &host->part_buf, cnt);
1436	host->part_buf_start = cnt;
1437	host->part_buf_count = (1 << host->data_shift) - cnt;
1438}
1439
1440static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1441{
1442	struct mmc_data *data = host->data;
1443	int init_cnt = cnt;
1444
1445	/* try and push anything in the part_buf */
1446	if (unlikely(host->part_buf_count)) {
1447		int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1448		buf += len;
1449		cnt -= len;
1450		if (host->part_buf_count == 2) {
1451			mci_writew(host, DATA(host->data_offset),
1452					host->part_buf16);
1453			host->part_buf_count = 0;
1454		}
1455	}
1456#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1457	if (unlikely((unsigned long)buf & 0x1)) {
1458		while (cnt >= 2) {
1459			u16 aligned_buf[64];
1460			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1461			int items = len >> 1;
1462			int i;
1463			/* memcpy from input buffer into aligned buffer */
1464			memcpy(aligned_buf, buf, len);
1465			buf += len;
1466			cnt -= len;
1467			/* push data from aligned buffer into fifo */
1468			for (i = 0; i < items; ++i)
1469				mci_writew(host, DATA(host->data_offset),
1470						aligned_buf[i]);
1471		}
1472	} else
1473#endif
1474	{
1475		u16 *pdata = buf;
 
1476		for (; cnt >= 2; cnt -= 2)
1477			mci_writew(host, DATA(host->data_offset), *pdata++);
1478		buf = pdata;
1479	}
1480	/* put anything remaining in the part_buf */
1481	if (cnt) {
1482		dw_mci_set_part_bytes(host, buf, cnt);
1483		 /* Push data if we have reached the expected data length */
1484		if ((data->bytes_xfered + init_cnt) ==
1485		    (data->blksz * data->blocks))
1486			mci_writew(host, DATA(host->data_offset),
1487				   host->part_buf16);
1488	}
1489}
1490
1491static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1492{
1493#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1494	if (unlikely((unsigned long)buf & 0x1)) {
1495		while (cnt >= 2) {
1496			/* pull data from fifo into aligned buffer */
1497			u16 aligned_buf[64];
1498			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1499			int items = len >> 1;
1500			int i;
 
1501			for (i = 0; i < items; ++i)
1502				aligned_buf[i] = mci_readw(host,
1503						DATA(host->data_offset));
1504			/* memcpy from aligned buffer into output buffer */
1505			memcpy(buf, aligned_buf, len);
1506			buf += len;
1507			cnt -= len;
1508		}
1509	} else
1510#endif
1511	{
1512		u16 *pdata = buf;
 
1513		for (; cnt >= 2; cnt -= 2)
1514			*pdata++ = mci_readw(host, DATA(host->data_offset));
1515		buf = pdata;
1516	}
1517	if (cnt) {
1518		host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1519		dw_mci_pull_final_bytes(host, buf, cnt);
1520	}
1521}
1522
1523static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1524{
1525	struct mmc_data *data = host->data;
1526	int init_cnt = cnt;
1527
1528	/* try and push anything in the part_buf */
1529	if (unlikely(host->part_buf_count)) {
1530		int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1531		buf += len;
1532		cnt -= len;
1533		if (host->part_buf_count == 4) {
1534			mci_writel(host, DATA(host->data_offset),
1535					host->part_buf32);
1536			host->part_buf_count = 0;
1537		}
1538	}
1539#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1540	if (unlikely((unsigned long)buf & 0x3)) {
1541		while (cnt >= 4) {
1542			u32 aligned_buf[32];
1543			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1544			int items = len >> 2;
1545			int i;
1546			/* memcpy from input buffer into aligned buffer */
1547			memcpy(aligned_buf, buf, len);
1548			buf += len;
1549			cnt -= len;
1550			/* push data from aligned buffer into fifo */
1551			for (i = 0; i < items; ++i)
1552				mci_writel(host, DATA(host->data_offset),
1553						aligned_buf[i]);
1554		}
1555	} else
1556#endif
1557	{
1558		u32 *pdata = buf;
 
1559		for (; cnt >= 4; cnt -= 4)
1560			mci_writel(host, DATA(host->data_offset), *pdata++);
1561		buf = pdata;
1562	}
1563	/* put anything remaining in the part_buf */
1564	if (cnt) {
1565		dw_mci_set_part_bytes(host, buf, cnt);
1566		 /* Push data if we have reached the expected data length */
1567		if ((data->bytes_xfered + init_cnt) ==
1568		    (data->blksz * data->blocks))
1569			mci_writel(host, DATA(host->data_offset),
1570				   host->part_buf32);
1571	}
1572}
1573
1574static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1575{
1576#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1577	if (unlikely((unsigned long)buf & 0x3)) {
1578		while (cnt >= 4) {
1579			/* pull data from fifo into aligned buffer */
1580			u32 aligned_buf[32];
1581			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1582			int items = len >> 2;
1583			int i;
 
1584			for (i = 0; i < items; ++i)
1585				aligned_buf[i] = mci_readl(host,
1586						DATA(host->data_offset));
1587			/* memcpy from aligned buffer into output buffer */
1588			memcpy(buf, aligned_buf, len);
1589			buf += len;
1590			cnt -= len;
1591		}
1592	} else
1593#endif
1594	{
1595		u32 *pdata = buf;
 
1596		for (; cnt >= 4; cnt -= 4)
1597			*pdata++ = mci_readl(host, DATA(host->data_offset));
1598		buf = pdata;
1599	}
1600	if (cnt) {
1601		host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1602		dw_mci_pull_final_bytes(host, buf, cnt);
1603	}
1604}
1605
1606static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1607{
1608	struct mmc_data *data = host->data;
1609	int init_cnt = cnt;
1610
1611	/* try and push anything in the part_buf */
1612	if (unlikely(host->part_buf_count)) {
1613		int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1614		buf += len;
1615		cnt -= len;
1616
1617		if (host->part_buf_count == 8) {
1618			mci_writeq(host, DATA(host->data_offset),
1619					host->part_buf);
1620			host->part_buf_count = 0;
1621		}
1622	}
1623#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1624	if (unlikely((unsigned long)buf & 0x7)) {
1625		while (cnt >= 8) {
1626			u64 aligned_buf[16];
1627			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1628			int items = len >> 3;
1629			int i;
1630			/* memcpy from input buffer into aligned buffer */
1631			memcpy(aligned_buf, buf, len);
1632			buf += len;
1633			cnt -= len;
1634			/* push data from aligned buffer into fifo */
1635			for (i = 0; i < items; ++i)
1636				mci_writeq(host, DATA(host->data_offset),
1637						aligned_buf[i]);
1638		}
1639	} else
1640#endif
1641	{
1642		u64 *pdata = buf;
 
1643		for (; cnt >= 8; cnt -= 8)
1644			mci_writeq(host, DATA(host->data_offset), *pdata++);
1645		buf = pdata;
1646	}
1647	/* put anything remaining in the part_buf */
1648	if (cnt) {
1649		dw_mci_set_part_bytes(host, buf, cnt);
1650		/* Push data if we have reached the expected data length */
1651		if ((data->bytes_xfered + init_cnt) ==
1652		    (data->blksz * data->blocks))
1653			mci_writeq(host, DATA(host->data_offset),
1654				   host->part_buf);
1655	}
1656}
1657
1658static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1659{
1660#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1661	if (unlikely((unsigned long)buf & 0x7)) {
1662		while (cnt >= 8) {
1663			/* pull data from fifo into aligned buffer */
1664			u64 aligned_buf[16];
1665			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1666			int items = len >> 3;
1667			int i;
 
1668			for (i = 0; i < items; ++i)
1669				aligned_buf[i] = mci_readq(host,
1670						DATA(host->data_offset));
1671			/* memcpy from aligned buffer into output buffer */
1672			memcpy(buf, aligned_buf, len);
1673			buf += len;
1674			cnt -= len;
1675		}
1676	} else
1677#endif
1678	{
1679		u64 *pdata = buf;
 
1680		for (; cnt >= 8; cnt -= 8)
1681			*pdata++ = mci_readq(host, DATA(host->data_offset));
1682		buf = pdata;
1683	}
1684	if (cnt) {
1685		host->part_buf = mci_readq(host, DATA(host->data_offset));
1686		dw_mci_pull_final_bytes(host, buf, cnt);
1687	}
1688}
1689
1690static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1691{
1692	int len;
1693
1694	/* get remaining partial bytes */
1695	len = dw_mci_pull_part_bytes(host, buf, cnt);
1696	if (unlikely(len == cnt))
1697		return;
1698	buf += len;
1699	cnt -= len;
1700
1701	/* get the rest of the data */
1702	host->pull_data(host, buf, cnt);
1703}
1704
1705static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1706{
1707	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1708	void *buf;
1709	unsigned int offset;
1710	struct mmc_data	*data = host->data;
1711	int shift = host->data_shift;
1712	u32 status;
1713	unsigned int len;
1714	unsigned int remain, fcnt;
1715
1716	do {
1717		if (!sg_miter_next(sg_miter))
1718			goto done;
1719
1720		host->sg = sg_miter->piter.sg;
1721		buf = sg_miter->addr;
1722		remain = sg_miter->length;
1723		offset = 0;
1724
1725		do {
1726			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1727					<< shift) + host->part_buf_count;
1728			len = min(remain, fcnt);
1729			if (!len)
1730				break;
1731			dw_mci_pull_data(host, (void *)(buf + offset), len);
1732			data->bytes_xfered += len;
1733			offset += len;
1734			remain -= len;
1735		} while (remain);
1736
1737		sg_miter->consumed = offset;
1738		status = mci_readl(host, MINTSTS);
1739		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1740	/* if the RXDR is ready read again */
1741	} while ((status & SDMMC_INT_RXDR) ||
1742		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1743
1744	if (!remain) {
1745		if (!sg_miter_next(sg_miter))
1746			goto done;
1747		sg_miter->consumed = 0;
1748	}
1749	sg_miter_stop(sg_miter);
1750	return;
1751
1752done:
1753	sg_miter_stop(sg_miter);
1754	host->sg = NULL;
1755	smp_wmb();
1756	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1757}
1758
1759static void dw_mci_write_data_pio(struct dw_mci *host)
1760{
1761	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1762	void *buf;
1763	unsigned int offset;
1764	struct mmc_data	*data = host->data;
1765	int shift = host->data_shift;
1766	u32 status;
1767	unsigned int len;
1768	unsigned int fifo_depth = host->fifo_depth;
1769	unsigned int remain, fcnt;
1770
1771	do {
1772		if (!sg_miter_next(sg_miter))
1773			goto done;
1774
1775		host->sg = sg_miter->piter.sg;
1776		buf = sg_miter->addr;
1777		remain = sg_miter->length;
1778		offset = 0;
1779
1780		do {
1781			fcnt = ((fifo_depth -
1782				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1783					<< shift) - host->part_buf_count;
1784			len = min(remain, fcnt);
1785			if (!len)
1786				break;
1787			host->push_data(host, (void *)(buf + offset), len);
1788			data->bytes_xfered += len;
1789			offset += len;
1790			remain -= len;
1791		} while (remain);
1792
1793		sg_miter->consumed = offset;
1794		status = mci_readl(host, MINTSTS);
1795		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1796	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1797
1798	if (!remain) {
1799		if (!sg_miter_next(sg_miter))
1800			goto done;
1801		sg_miter->consumed = 0;
1802	}
1803	sg_miter_stop(sg_miter);
1804	return;
1805
1806done:
1807	sg_miter_stop(sg_miter);
1808	host->sg = NULL;
1809	smp_wmb();
1810	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1811}
1812
1813static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1814{
1815	if (!host->cmd_status)
1816		host->cmd_status = status;
1817
1818	smp_wmb();
1819
1820	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1821	tasklet_schedule(&host->tasklet);
1822}
1823
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1824static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1825{
1826	struct dw_mci *host = dev_id;
1827	u32 pending;
1828	int i;
1829
1830	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1831
1832	/*
1833	 * DTO fix - version 2.10a and below, and only if internal DMA
1834	 * is configured.
1835	 */
1836	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1837		if (!pending &&
1838		    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1839			pending |= SDMMC_INT_DATA_OVER;
1840	}
 
 
 
 
 
 
 
 
 
 
1841
1842	if (pending) {
1843		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1844			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1845			host->cmd_status = pending;
1846			smp_wmb();
1847			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1848		}
1849
1850		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1851			/* if there is an error report DATA_ERROR */
1852			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1853			host->data_status = pending;
1854			smp_wmb();
1855			set_bit(EVENT_DATA_ERROR, &host->pending_events);
1856			tasklet_schedule(&host->tasklet);
1857		}
1858
1859		if (pending & SDMMC_INT_DATA_OVER) {
 
 
 
1860			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1861			if (!host->data_status)
1862				host->data_status = pending;
1863			smp_wmb();
1864			if (host->dir_status == DW_MCI_RECV_STATUS) {
1865				if (host->sg != NULL)
1866					dw_mci_read_data_pio(host, true);
1867			}
1868			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1869			tasklet_schedule(&host->tasklet);
1870		}
1871
1872		if (pending & SDMMC_INT_RXDR) {
1873			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1874			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1875				dw_mci_read_data_pio(host, false);
1876		}
1877
1878		if (pending & SDMMC_INT_TXDR) {
1879			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1880			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1881				dw_mci_write_data_pio(host);
1882		}
1883
1884		if (pending & SDMMC_INT_CMD_DONE) {
1885			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1886			dw_mci_cmd_interrupt(host, pending);
1887		}
1888
1889		if (pending & SDMMC_INT_CD) {
1890			mci_writel(host, RINTSTS, SDMMC_INT_CD);
1891			queue_work(host->card_workqueue, &host->card_work);
1892		}
1893
1894		/* Handle SDIO Interrupts */
1895		for (i = 0; i < host->num_slots; i++) {
1896			struct dw_mci_slot *slot = host->slot[i];
1897			if (pending & SDMMC_INT_SDIO(i)) {
1898				mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
 
 
 
 
 
1899				mmc_signal_sdio_irq(slot->mmc);
1900			}
1901		}
1902
1903	}
1904
1905#ifdef CONFIG_MMC_DW_IDMAC
1906	/* Handle DMA interrupts */
1907	pending = mci_readl(host, IDSTS);
1908	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1909		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1910		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1911		host->dma_ops->complete(host);
 
 
 
 
 
 
 
 
 
 
 
 
 
1912	}
1913#endif
1914
1915	return IRQ_HANDLED;
1916}
1917
1918static void dw_mci_work_routine_card(struct work_struct *work)
1919{
1920	struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1921	int i;
1922
1923	for (i = 0; i < host->num_slots; i++) {
1924		struct dw_mci_slot *slot = host->slot[i];
1925		struct mmc_host *mmc = slot->mmc;
1926		struct mmc_request *mrq;
1927		int present;
1928
1929		present = dw_mci_get_cd(mmc);
1930		while (present != slot->last_detect_state) {
1931			dev_dbg(&slot->mmc->class_dev, "card %s\n",
1932				present ? "inserted" : "removed");
1933
1934			spin_lock_bh(&host->lock);
1935
1936			/* Card change detected */
1937			slot->last_detect_state = present;
1938
1939			/* Clean up queue if present */
1940			mrq = slot->mrq;
1941			if (mrq) {
1942				if (mrq == host->mrq) {
1943					host->data = NULL;
1944					host->cmd = NULL;
1945
1946					switch (host->state) {
1947					case STATE_IDLE:
1948						break;
1949					case STATE_SENDING_CMD:
1950						mrq->cmd->error = -ENOMEDIUM;
1951						if (!mrq->data)
1952							break;
1953						/* fall through */
1954					case STATE_SENDING_DATA:
1955						mrq->data->error = -ENOMEDIUM;
1956						dw_mci_stop_dma(host);
1957						break;
1958					case STATE_DATA_BUSY:
1959					case STATE_DATA_ERROR:
1960						if (mrq->data->error == -EINPROGRESS)
1961							mrq->data->error = -ENOMEDIUM;
1962						/* fall through */
1963					case STATE_SENDING_STOP:
1964						if (mrq->stop)
1965							mrq->stop->error = -ENOMEDIUM;
1966						break;
1967					}
1968
1969					dw_mci_request_end(host, mrq);
1970				} else {
1971					list_del(&slot->queue_node);
1972					mrq->cmd->error = -ENOMEDIUM;
1973					if (mrq->data)
1974						mrq->data->error = -ENOMEDIUM;
1975					if (mrq->stop)
1976						mrq->stop->error = -ENOMEDIUM;
1977
1978					spin_unlock(&host->lock);
1979					mmc_request_done(slot->mmc, mrq);
1980					spin_lock(&host->lock);
1981				}
1982			}
1983
1984			/* Power down slot */
1985			if (present == 0) {
1986				/* Clear down the FIFO */
1987				dw_mci_fifo_reset(host);
1988#ifdef CONFIG_MMC_DW_IDMAC
1989				dw_mci_idmac_reset(host);
1990#endif
1991
1992			}
1993
1994			spin_unlock_bh(&host->lock);
1995
1996			present = dw_mci_get_cd(mmc);
1997		}
1998
1999		mmc_detect_change(slot->mmc,
2000			msecs_to_jiffies(host->pdata->detect_delay_ms));
2001	}
2002}
2003
2004#ifdef CONFIG_OF
2005/* given a slot id, find out the device node representing that slot */
2006static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2007{
 
2008	struct device_node *np;
2009	const __be32 *addr;
2010	int len;
2011
2012	if (!dev || !dev->of_node)
2013		return NULL;
2014
2015	for_each_child_of_node(dev->of_node, np) {
2016		addr = of_get_property(np, "reg", &len);
2017		if (!addr || (len < sizeof(int)))
2018			continue;
2019		if (be32_to_cpup(addr) == slot)
2020			return np;
2021	}
2022	return NULL;
2023}
2024
2025static struct dw_mci_of_slot_quirks {
2026	char *quirk;
2027	int id;
2028} of_slot_quirks[] = {
2029	{
2030		.quirk	= "disable-wp",
2031		.id	= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2032	},
2033};
2034
2035static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2036{
2037	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2038	int quirks = 0;
2039	int idx;
2040
2041	/* get quirks */
2042	for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2043		if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2044			quirks |= of_slot_quirks[idx].id;
2045
2046	return quirks;
2047}
2048
2049/* find out bus-width for a given slot */
2050static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2051{
2052	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2053	u32 bus_wd = 1;
2054
2055	if (!np)
2056		return 1;
2057
2058	if (of_property_read_u32(np, "bus-width", &bus_wd))
2059		dev_err(dev, "bus-width property not found, assuming width"
2060			       " as 1\n");
2061	return bus_wd;
2062}
2063
2064/* find the write protect gpio for a given slot; or -1 if none specified */
2065static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2066{
2067	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2068	int gpio;
2069
2070	if (!np)
2071		return -EINVAL;
2072
2073	gpio = of_get_named_gpio(np, "wp-gpios", 0);
2074
2075	/* Having a missing entry is valid; return silently */
2076	if (!gpio_is_valid(gpio))
2077		return -EINVAL;
2078
2079	if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2080		dev_warn(dev, "gpio [%d] request failed\n", gpio);
2081		return -EINVAL;
 
2082	}
2083
2084	return gpio;
2085}
2086
2087/* find the cd gpio for a given slot */
2088static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2089					struct mmc_host *mmc)
2090{
2091	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2092	int gpio;
2093
2094	if (!np)
2095		return;
2096
2097	gpio = of_get_named_gpio(np, "cd-gpios", 0);
2098
2099	/* Having a missing entry is valid; return silently */
2100	if (!gpio_is_valid(gpio))
2101		return;
2102
2103	if (mmc_gpio_request_cd(mmc, gpio, 0))
2104		dev_warn(dev, "gpio [%d] request failed\n", gpio);
2105}
2106#else /* CONFIG_OF */
2107static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2108{
2109	return 0;
2110}
2111static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2112{
2113	return 1;
2114}
2115static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2116{
2117	return NULL;
2118}
2119static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2120{
2121	return -EINVAL;
2122}
2123static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2124					struct mmc_host *mmc)
2125{
2126	return;
2127}
2128#endif /* CONFIG_OF */
2129
2130static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2131{
2132	struct mmc_host *mmc;
2133	struct dw_mci_slot *slot;
2134	const struct dw_mci_drv_data *drv_data = host->drv_data;
2135	int ctrl_id, ret;
2136	u32 freq[2];
2137	u8 bus_width;
2138
2139	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2140	if (!mmc)
2141		return -ENOMEM;
2142
2143	slot = mmc_priv(mmc);
2144	slot->id = id;
 
2145	slot->mmc = mmc;
2146	slot->host = host;
2147	host->slot[id] = slot;
2148
2149	slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2150
2151	mmc->ops = &dw_mci_ops;
2152	if (of_property_read_u32_array(host->dev->of_node,
2153				       "clock-freq-min-max", freq, 2)) {
2154		mmc->f_min = DW_MCI_FREQ_MIN;
2155		mmc->f_max = DW_MCI_FREQ_MAX;
2156	} else {
2157		mmc->f_min = freq[0];
2158		mmc->f_max = freq[1];
2159	}
2160
2161	if (host->pdata->get_ocr)
2162		mmc->ocr_avail = host->pdata->get_ocr(id);
2163	else
 
 
 
2164		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2165
2166	/*
2167	 * Start with slot power disabled, it will be enabled when a card
2168	 * is detected.
2169	 */
2170	if (host->pdata->setpower)
2171		host->pdata->setpower(id, 0);
2172
2173	if (host->pdata->caps)
2174		mmc->caps = host->pdata->caps;
2175
2176	if (host->pdata->pm_caps)
2177		mmc->pm_caps = host->pdata->pm_caps;
2178
2179	if (host->dev->of_node) {
2180		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2181		if (ctrl_id < 0)
2182			ctrl_id = 0;
2183	} else {
2184		ctrl_id = to_platform_device(host->dev)->id;
2185	}
2186	if (drv_data && drv_data->caps)
2187		mmc->caps |= drv_data->caps[ctrl_id];
2188
2189	if (host->pdata->caps2)
2190		mmc->caps2 = host->pdata->caps2;
2191
2192	if (host->pdata->get_bus_wd)
2193		bus_width = host->pdata->get_bus_wd(slot->id);
2194	else if (host->dev->of_node)
2195		bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2196	else
2197		bus_width = 1;
2198
2199	switch (bus_width) {
2200	case 8:
2201		mmc->caps |= MMC_CAP_8_BIT_DATA;
2202	case 4:
2203		mmc->caps |= MMC_CAP_4_BIT_DATA;
2204	}
2205
2206	if (host->pdata->blk_settings) {
2207		mmc->max_segs = host->pdata->blk_settings->max_segs;
2208		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2209		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2210		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2211		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2212	} else {
2213		/* Useful defaults if platform data is unset. */
2214#ifdef CONFIG_MMC_DW_IDMAC
2215		mmc->max_segs = host->ring_size;
2216		mmc->max_blk_size = 65536;
2217		mmc->max_blk_count = host->ring_size;
2218		mmc->max_seg_size = 0x1000;
2219		mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2220#else
 
 
 
 
 
 
 
 
 
2221		mmc->max_segs = 64;
2222		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2223		mmc->max_blk_count = 512;
2224		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
 
2225		mmc->max_seg_size = mmc->max_req_size;
2226#endif /* CONFIG_MMC_DW_IDMAC */
2227	}
2228
2229	slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2230	dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
 
 
2231
2232	ret = mmc_add_host(mmc);
2233	if (ret)
2234		goto err_setup_bus;
2235
2236#if defined(CONFIG_DEBUG_FS)
2237	dw_mci_init_debugfs(slot);
2238#endif
2239
2240	/* Card initially undetected */
2241	slot->last_detect_state = 0;
2242
2243	return 0;
2244
2245err_setup_bus:
2246	mmc_free_host(mmc);
2247	return -EINVAL;
2248}
2249
2250static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2251{
2252	/* Shutdown detect IRQ */
2253	if (slot->host->pdata->exit)
2254		slot->host->pdata->exit(id);
2255
2256	/* Debugfs stuff is cleaned up by mmc core */
2257	mmc_remove_host(slot->mmc);
2258	slot->host->slot[id] = NULL;
2259	mmc_free_host(slot->mmc);
2260}
2261
2262static void dw_mci_init_dma(struct dw_mci *host)
2263{
2264	/* Alloc memory for sg translation */
2265	host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2266					  &host->sg_dma, GFP_KERNEL);
2267	if (!host->sg_cpu) {
2268		dev_err(host->dev, "%s: could not alloc DMA memory\n",
2269			__func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2270		goto no_dma;
2271	}
2272
2273	/* Determine which DMA interface to use */
2274#ifdef CONFIG_MMC_DW_IDMAC
2275	host->dma_ops = &dw_mci_idmac_ops;
2276	dev_info(host->dev, "Using internal DMA controller.\n");
2277#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2278
2279	if (!host->dma_ops)
2280		goto no_dma;
 
 
 
 
 
 
 
 
 
2281
2282	if (host->dma_ops->init && host->dma_ops->start &&
2283	    host->dma_ops->stop && host->dma_ops->cleanup) {
2284		if (host->dma_ops->init(host)) {
2285			dev_err(host->dev, "%s: Unable to initialize "
2286				"DMA Controller.\n", __func__);
2287			goto no_dma;
2288		}
2289	} else {
2290		dev_err(host->dev, "DMA initialization not found.\n");
2291		goto no_dma;
2292	}
2293
2294	host->use_dma = 1;
2295	return;
2296
2297no_dma:
2298	dev_info(host->dev, "Using PIO mode.\n");
2299	host->use_dma = 0;
2300	return;
2301}
2302
2303static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2304{
2305	unsigned long timeout = jiffies + msecs_to_jiffies(500);
2306	u32 ctrl;
2307
2308	ctrl = mci_readl(host, CTRL);
2309	ctrl |= reset;
2310	mci_writel(host, CTRL, ctrl);
2311
2312	/* wait till resets clear */
2313	do {
2314		ctrl = mci_readl(host, CTRL);
2315		if (!(ctrl & reset))
2316			return true;
2317	} while (time_before(jiffies, timeout));
2318
2319	dev_err(host->dev,
2320		"Timeout resetting block (ctrl reset %#x)\n",
2321		ctrl & reset);
2322
2323	return false;
2324}
2325
2326static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2327{
 
 
 
2328	/*
2329	 * Reseting generates a block interrupt, hence setting
2330	 * the scatter-gather pointer to NULL.
2331	 */
2332	if (host->sg) {
2333		sg_miter_stop(&host->sg_miter);
2334		host->sg = NULL;
2335	}
2336
2337	return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2338}
2339
2340static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2341{
2342	return dw_mci_ctrl_reset(host,
2343				 SDMMC_CTRL_FIFO_RESET |
2344				 SDMMC_CTRL_RESET |
2345				 SDMMC_CTRL_DMA_RESET);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2346}
2347
2348#ifdef CONFIG_OF
2349static struct dw_mci_of_quirks {
2350	char *quirk;
2351	int id;
2352} of_quirks[] = {
2353	{
2354		.quirk	= "broken-cd",
2355		.id	= DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2356	},
2357};
2358
2359static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2360{
2361	struct dw_mci_board *pdata;
2362	struct device *dev = host->dev;
2363	struct device_node *np = dev->of_node;
2364	const struct dw_mci_drv_data *drv_data = host->drv_data;
2365	int idx, ret;
2366	u32 clock_frequency;
2367
2368	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2369	if (!pdata) {
2370		dev_err(dev, "could not allocate memory for pdata\n");
2371		return ERR_PTR(-ENOMEM);
2372	}
2373
2374	/* find out number of slots supported */
2375	if (of_property_read_u32(dev->of_node, "num-slots",
2376				&pdata->num_slots)) {
2377		dev_info(dev, "num-slots property not found, "
2378				"assuming 1 slot is available\n");
2379		pdata->num_slots = 1;
2380	}
2381
2382	/* get quirks */
2383	for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2384		if (of_get_property(np, of_quirks[idx].quirk, NULL))
2385			pdata->quirks |= of_quirks[idx].id;
2386
2387	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2388		dev_info(dev, "fifo-depth property not found, using "
2389				"value of FIFOTH register as default\n");
2390
2391	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2392
2393	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2394		pdata->bus_hz = clock_frequency;
2395
2396	if (drv_data && drv_data->parse_dt) {
2397		ret = drv_data->parse_dt(host);
2398		if (ret)
2399			return ERR_PTR(ret);
2400	}
2401
2402	if (of_find_property(np, "keep-power-in-suspend", NULL))
2403		pdata->pm_caps |= MMC_PM_KEEP_POWER;
2404
2405	if (of_find_property(np, "enable-sdio-wakeup", NULL))
2406		pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2407
2408	if (of_find_property(np, "supports-highspeed", NULL))
2409		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2410
2411	if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2412		pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2413
2414	if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2415		pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2416
2417	if (of_get_property(np, "cd-inverted", NULL))
2418		pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
2419
2420	return pdata;
2421}
2422
2423#else /* CONFIG_OF */
2424static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2425{
2426	return ERR_PTR(-EINVAL);
2427}
2428#endif /* CONFIG_OF */
2429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2430int dw_mci_probe(struct dw_mci *host)
2431{
2432	const struct dw_mci_drv_data *drv_data = host->drv_data;
2433	int width, i, ret = 0;
2434	u32 fifo_size;
2435	int init_slots = 0;
2436
2437	if (!host->pdata) {
2438		host->pdata = dw_mci_parse_dt(host);
2439		if (IS_ERR(host->pdata)) {
2440			dev_err(host->dev, "platform data not available\n");
2441			return -EINVAL;
2442		}
2443	}
2444
2445	if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2446		dev_err(host->dev,
2447			"Platform data must supply select_slot function\n");
2448		return -ENODEV;
2449	}
2450
2451	host->biu_clk = devm_clk_get(host->dev, "biu");
2452	if (IS_ERR(host->biu_clk)) {
2453		dev_dbg(host->dev, "biu clock not available\n");
2454	} else {
2455		ret = clk_prepare_enable(host->biu_clk);
2456		if (ret) {
2457			dev_err(host->dev, "failed to enable biu clock\n");
2458			return ret;
2459		}
2460	}
2461
2462	host->ciu_clk = devm_clk_get(host->dev, "ciu");
2463	if (IS_ERR(host->ciu_clk)) {
2464		dev_dbg(host->dev, "ciu clock not available\n");
2465		host->bus_hz = host->pdata->bus_hz;
2466	} else {
2467		ret = clk_prepare_enable(host->ciu_clk);
2468		if (ret) {
2469			dev_err(host->dev, "failed to enable ciu clock\n");
2470			goto err_clk_biu;
2471		}
2472
2473		if (host->pdata->bus_hz) {
2474			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2475			if (ret)
2476				dev_warn(host->dev,
2477					 "Unable to set bus rate to %ul\n",
2478					 host->pdata->bus_hz);
2479		}
2480		host->bus_hz = clk_get_rate(host->ciu_clk);
2481	}
2482
 
 
 
 
 
 
 
2483	if (drv_data && drv_data->init) {
2484		ret = drv_data->init(host);
2485		if (ret) {
2486			dev_err(host->dev,
2487				"implementation specific init failed\n");
2488			goto err_clk_ciu;
2489		}
2490	}
2491
2492	if (drv_data && drv_data->setup_clock) {
2493		ret = drv_data->setup_clock(host);
2494		if (ret) {
2495			dev_err(host->dev,
2496				"implementation specific clock setup failed\n");
2497			goto err_clk_ciu;
2498		}
2499	}
2500
2501	host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2502	if (IS_ERR(host->vmmc)) {
2503		ret = PTR_ERR(host->vmmc);
2504		if (ret == -EPROBE_DEFER)
2505			goto err_clk_ciu;
2506
2507		dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2508		host->vmmc = NULL;
2509	} else {
2510		ret = regulator_enable(host->vmmc);
2511		if (ret) {
2512			if (ret != -EPROBE_DEFER)
2513				dev_err(host->dev,
2514					"regulator_enable fail: %d\n", ret);
2515			goto err_clk_ciu;
2516		}
2517	}
2518
2519	if (!host->bus_hz) {
2520		dev_err(host->dev,
2521			"Platform data must supply bus speed\n");
2522		ret = -ENODEV;
2523		goto err_regulator;
2524	}
2525
2526	host->quirks = host->pdata->quirks;
 
 
2527
2528	spin_lock_init(&host->lock);
 
2529	INIT_LIST_HEAD(&host->queue);
2530
2531	/*
2532	 * Get the host data width - this assumes that HCON has been set with
2533	 * the correct values.
2534	 */
2535	i = (mci_readl(host, HCON) >> 7) & 0x7;
2536	if (!i) {
2537		host->push_data = dw_mci_push_data16;
2538		host->pull_data = dw_mci_pull_data16;
2539		width = 16;
2540		host->data_shift = 1;
2541	} else if (i == 2) {
2542		host->push_data = dw_mci_push_data64;
2543		host->pull_data = dw_mci_pull_data64;
2544		width = 64;
2545		host->data_shift = 3;
2546	} else {
2547		/* Check for a reserved value, and warn if it is */
2548		WARN((i != 1),
2549		     "HCON reports a reserved host data width!\n"
2550		     "Defaulting to 32-bit access.\n");
2551		host->push_data = dw_mci_push_data32;
2552		host->pull_data = dw_mci_pull_data32;
2553		width = 32;
2554		host->data_shift = 2;
2555	}
2556
2557	/* Reset all blocks */
2558	if (!dw_mci_ctrl_all_reset(host))
2559		return -ENODEV;
 
 
2560
2561	host->dma_ops = host->pdata->dma_ops;
2562	dw_mci_init_dma(host);
2563
2564	/* Clear the interrupts for the host controller */
2565	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2566	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2567
2568	/* Put in max timeout */
2569	mci_writel(host, TMOUT, 0xFFFFFFFF);
2570
2571	/*
2572	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2573	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
2574	 */
2575	if (!host->pdata->fifo_depth) {
2576		/*
2577		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2578		 * have been overwritten by the bootloader, just like we're
2579		 * about to do, so if you know the value for your hardware, you
2580		 * should put it in the platform data.
2581		 */
2582		fifo_size = mci_readl(host, FIFOTH);
2583		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2584	} else {
2585		fifo_size = host->pdata->fifo_depth;
2586	}
2587	host->fifo_depth = fifo_size;
2588	host->fifoth_val =
2589		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2590	mci_writel(host, FIFOTH, host->fifoth_val);
2591
2592	/* disable clock to CIU */
2593	mci_writel(host, CLKENA, 0);
2594	mci_writel(host, CLKSRC, 0);
2595
2596	/*
2597	 * In 2.40a spec, Data offset is changed.
2598	 * Need to check the version-id and set data-offset for DATA register.
2599	 */
2600	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2601	dev_info(host->dev, "Version ID is %04x\n", host->verid);
2602
2603	if (host->verid < DW_MMC_240A)
2604		host->data_offset = DATA_OFFSET;
2605	else
2606		host->data_offset = DATA_240A_OFFSET;
2607
2608	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2609	host->card_workqueue = alloc_workqueue("dw-mci-card",
2610			WQ_MEM_RECLAIM, 1);
2611	if (!host->card_workqueue) {
2612		ret = -ENOMEM;
2613		goto err_dmaunmap;
2614	}
2615	INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2616	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2617			       host->irq_flags, "dw-mci", host);
2618	if (ret)
2619		goto err_workqueue;
2620
2621	if (host->pdata->num_slots)
2622		host->num_slots = host->pdata->num_slots;
2623	else
2624		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
 
 
 
 
 
 
 
 
2625
2626	/*
2627	 * Enable interrupts for command done, data over, data empty, card det,
2628	 * receive ready and error such as transmit, receive timeout, crc error
2629	 */
2630	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2631	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2632		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2633		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2634	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
 
2635
2636	dev_info(host->dev, "DW MMC controller at irq %d, "
2637		 "%d bit host data width, "
2638		 "%u deep fifo\n",
2639		 host->irq, width, fifo_size);
2640
2641	/* We need at least one slot to succeed */
2642	for (i = 0; i < host->num_slots; i++) {
2643		ret = dw_mci_init_slot(host, i);
2644		if (ret)
2645			dev_dbg(host->dev, "slot %d init failed\n", i);
2646		else
2647			init_slots++;
2648	}
2649
2650	if (init_slots) {
2651		dev_info(host->dev, "%d slots initialized\n", init_slots);
2652	} else {
2653		dev_dbg(host->dev, "attempted to initialize %d slots, "
2654					"but failed on all\n", host->num_slots);
2655		goto err_workqueue;
 
2656	}
2657
2658	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2659		dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2660
2661	return 0;
2662
2663err_workqueue:
2664	destroy_workqueue(host->card_workqueue);
2665
2666err_dmaunmap:
2667	if (host->use_dma && host->dma_ops->exit)
2668		host->dma_ops->exit(host);
2669
2670err_regulator:
2671	if (host->vmmc)
2672		regulator_disable(host->vmmc);
2673
2674err_clk_ciu:
2675	if (!IS_ERR(host->ciu_clk))
2676		clk_disable_unprepare(host->ciu_clk);
2677
2678err_clk_biu:
2679	if (!IS_ERR(host->biu_clk))
2680		clk_disable_unprepare(host->biu_clk);
2681
2682	return ret;
2683}
2684EXPORT_SYMBOL(dw_mci_probe);
2685
2686void dw_mci_remove(struct dw_mci *host)
2687{
2688	int i;
2689
2690	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2691	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2692
2693	for (i = 0; i < host->num_slots; i++) {
2694		dev_dbg(host->dev, "remove slot %d\n", i);
2695		if (host->slot[i])
2696			dw_mci_cleanup_slot(host->slot[i], i);
2697	}
2698
 
 
 
2699	/* disable clock to CIU */
2700	mci_writel(host, CLKENA, 0);
2701	mci_writel(host, CLKSRC, 0);
2702
2703	destroy_workqueue(host->card_workqueue);
2704
2705	if (host->use_dma && host->dma_ops->exit)
2706		host->dma_ops->exit(host);
2707
2708	if (host->vmmc)
2709		regulator_disable(host->vmmc);
2710
2711	if (!IS_ERR(host->ciu_clk))
2712		clk_disable_unprepare(host->ciu_clk);
2713
2714	if (!IS_ERR(host->biu_clk))
2715		clk_disable_unprepare(host->biu_clk);
2716}
2717EXPORT_SYMBOL(dw_mci_remove);
2718
2719
2720
2721#ifdef CONFIG_PM_SLEEP
2722/*
2723 * TODO: we should probably disable the clock to the card in the suspend path.
2724 */
2725int dw_mci_suspend(struct dw_mci *host)
2726{
2727	if (host->vmmc)
2728		regulator_disable(host->vmmc);
2729
2730	return 0;
2731}
2732EXPORT_SYMBOL(dw_mci_suspend);
2733
2734int dw_mci_resume(struct dw_mci *host)
2735{
2736	int i, ret;
2737
2738	if (host->vmmc) {
2739		ret = regulator_enable(host->vmmc);
2740		if (ret) {
2741			dev_err(host->dev,
2742				"failed to enable regulator: %d\n", ret);
2743			return ret;
2744		}
2745	}
2746
2747	if (!dw_mci_ctrl_all_reset(host)) {
2748		ret = -ENODEV;
2749		return ret;
2750	}
2751
2752	if (host->use_dma && host->dma_ops->init)
2753		host->dma_ops->init(host);
2754
2755	/*
2756	 * Restore the initial value at FIFOTH register
2757	 * And Invalidate the prev_blksz with zero
2758	 */
2759	mci_writel(host, FIFOTH, host->fifoth_val);
2760	host->prev_blksz = 0;
2761
2762	/* Put in max timeout */
2763	mci_writel(host, TMOUT, 0xFFFFFFFF);
2764
2765	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2766	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2767		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2768		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2769	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2770
2771	for (i = 0; i < host->num_slots; i++) {
2772		struct dw_mci_slot *slot = host->slot[i];
 
2773		if (!slot)
2774			continue;
2775		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2776			dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2777			dw_mci_setup_bus(slot, true);
2778		}
2779	}
 
 
 
 
2780	return 0;
2781}
2782EXPORT_SYMBOL(dw_mci_resume);
2783#endif /* CONFIG_PM_SLEEP */
2784
2785static int __init dw_mci_init(void)
2786{
2787	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2788	return 0;
2789}
2790
2791static void __exit dw_mci_exit(void)
2792{
2793}
2794
2795module_init(dw_mci_init);
2796module_exit(dw_mci_exit);
2797
2798MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2799MODULE_AUTHOR("NXP Semiconductor VietNam");
2800MODULE_AUTHOR("Imagination Technologies Ltd");
2801MODULE_LICENSE("GPL v2");