Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * Synopsys DesignWare Multimedia Card Interface driver
   3 *  (Based on NXP driver for lpc 31xx)
   4 *
   5 * Copyright (C) 2009 NXP Semiconductors
   6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14#include <linux/blkdev.h>
  15#include <linux/clk.h>
  16#include <linux/debugfs.h>
  17#include <linux/device.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/err.h>
  20#include <linux/init.h>
  21#include <linux/interrupt.h>
 
  22#include <linux/ioport.h>
  23#include <linux/module.h>
  24#include <linux/platform_device.h>
 
  25#include <linux/seq_file.h>
  26#include <linux/slab.h>
  27#include <linux/stat.h>
  28#include <linux/delay.h>
  29#include <linux/irq.h>
 
  30#include <linux/mmc/host.h>
  31#include <linux/mmc/mmc.h>
 
  32#include <linux/mmc/sdio.h>
  33#include <linux/mmc/dw_mmc.h>
  34#include <linux/bitops.h>
  35#include <linux/regulator/consumer.h>
  36#include <linux/workqueue.h>
  37#include <linux/of.h>
  38#include <linux/of_gpio.h>
  39#include <linux/mmc/slot-gpio.h>
  40
  41#include "dw_mmc.h"
  42
  43/* Common flag combinations */
  44#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
  45				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
  46				 SDMMC_INT_EBE)
  47#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  48				 SDMMC_INT_RESP_ERR)
  49#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
  50				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
  51#define DW_MCI_SEND_STATUS	1
  52#define DW_MCI_RECV_STATUS	2
  53#define DW_MCI_DMA_THRESHOLD	16
  54
  55#define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
  56#define DW_MCI_FREQ_MIN	400000		/* unit: HZ */
  57
  58#ifdef CONFIG_MMC_DW_IDMAC
  59#define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
  60				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
  61				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
  62				 SDMMC_IDMAC_INT_TI)
  63
  64struct idmac_desc {
 
 
  65	u32		des0;	/* Control Descriptor */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  66#define IDMAC_DES0_DIC	BIT(1)
  67#define IDMAC_DES0_LD	BIT(2)
  68#define IDMAC_DES0_FD	BIT(3)
  69#define IDMAC_DES0_CH	BIT(4)
  70#define IDMAC_DES0_ER	BIT(5)
  71#define IDMAC_DES0_CES	BIT(30)
  72#define IDMAC_DES0_OWN	BIT(31)
  73
  74	u32		des1;	/* Buffer sizes */
  75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
  76	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
  77
  78	u32		des2;	/* buffer 1 physical address */
  79
  80	u32		des3;	/* buffer 2 physical address */
  81};
  82#endif /* CONFIG_MMC_DW_IDMAC */
  83
  84static const u8 tuning_blk_pattern_4bit[] = {
  85	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  86	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  87	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  88	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  89	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  90	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  91	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  92	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  93};
  94
  95static const u8 tuning_blk_pattern_8bit[] = {
  96	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  97	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  98	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  99	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
 100	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
 101	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
 102	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
 103	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
 104	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
 105	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
 106	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
 107	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
 108	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
 109	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
 110	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
 111	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
 112};
 113
 114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
 115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
 116
 117#if defined(CONFIG_DEBUG_FS)
 118static int dw_mci_req_show(struct seq_file *s, void *v)
 119{
 120	struct dw_mci_slot *slot = s->private;
 121	struct mmc_request *mrq;
 122	struct mmc_command *cmd;
 123	struct mmc_command *stop;
 124	struct mmc_data	*data;
 125
 126	/* Make sure we get a consistent snapshot */
 127	spin_lock_bh(&slot->host->lock);
 128	mrq = slot->mrq;
 129
 130	if (mrq) {
 131		cmd = mrq->cmd;
 132		data = mrq->data;
 133		stop = mrq->stop;
 134
 135		if (cmd)
 136			seq_printf(s,
 137				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 138				   cmd->opcode, cmd->arg, cmd->flags,
 139				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
 140				   cmd->resp[2], cmd->error);
 141		if (data)
 142			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
 143				   data->bytes_xfered, data->blocks,
 144				   data->blksz, data->flags, data->error);
 145		if (stop)
 146			seq_printf(s,
 147				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 148				   stop->opcode, stop->arg, stop->flags,
 149				   stop->resp[0], stop->resp[1], stop->resp[2],
 150				   stop->resp[2], stop->error);
 151	}
 152
 153	spin_unlock_bh(&slot->host->lock);
 154
 155	return 0;
 156}
 
 157
 158static int dw_mci_req_open(struct inode *inode, struct file *file)
 159{
 160	return single_open(file, dw_mci_req_show, inode->i_private);
 161}
 162
 163static const struct file_operations dw_mci_req_fops = {
 164	.owner		= THIS_MODULE,
 165	.open		= dw_mci_req_open,
 166	.read		= seq_read,
 167	.llseek		= seq_lseek,
 168	.release	= single_release,
 169};
 170
 171static int dw_mci_regs_show(struct seq_file *s, void *v)
 172{
 173	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
 174	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
 175	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
 176	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
 177	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
 178	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
 179
 180	return 0;
 181}
 182
 183static int dw_mci_regs_open(struct inode *inode, struct file *file)
 184{
 185	return single_open(file, dw_mci_regs_show, inode->i_private);
 186}
 187
 188static const struct file_operations dw_mci_regs_fops = {
 189	.owner		= THIS_MODULE,
 190	.open		= dw_mci_regs_open,
 191	.read		= seq_read,
 192	.llseek		= seq_lseek,
 193	.release	= single_release,
 194};
 195
 196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
 197{
 198	struct mmc_host	*mmc = slot->mmc;
 199	struct dw_mci *host = slot->host;
 200	struct dentry *root;
 201	struct dentry *node;
 202
 203	root = mmc->debugfs_root;
 204	if (!root)
 205		return;
 206
 207	node = debugfs_create_file("regs", S_IRUSR, root, host,
 208				   &dw_mci_regs_fops);
 209	if (!node)
 210		goto err;
 
 
 
 
 
 211
 212	node = debugfs_create_file("req", S_IRUSR, root, slot,
 213				   &dw_mci_req_fops);
 214	if (!node)
 215		goto err;
 216
 217	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
 218	if (!node)
 219		goto err;
 220
 221	node = debugfs_create_x32("pending_events", S_IRUSR, root,
 222				  (u32 *)&host->pending_events);
 223	if (!node)
 224		goto err;
 
 
 
 
 
 225
 226	node = debugfs_create_x32("completed_events", S_IRUSR, root,
 227				  (u32 *)&host->completed_events);
 228	if (!node)
 229		goto err;
 230
 231	return;
 
 
 232
 233err:
 234	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235}
 236#endif /* defined(CONFIG_DEBUG_FS) */
 237
 238static void dw_mci_set_timeout(struct dw_mci *host)
 239{
 240	/* timeout (maximum) */
 241	mci_writel(host, TMOUT, 0xffffffff);
 
 
 
 
 
 
 
 
 
 
 
 
 242}
 243
 244static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
 245{
 246	struct mmc_data	*data;
 247	struct dw_mci_slot *slot = mmc_priv(mmc);
 248	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
 249	u32 cmdr;
 250	cmd->error = -EINPROGRESS;
 251
 
 252	cmdr = cmd->opcode;
 253
 254	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
 255	    cmd->opcode == MMC_GO_IDLE_STATE ||
 256	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
 257	    (cmd->opcode == SD_IO_RW_DIRECT &&
 258	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
 259		cmdr |= SDMMC_CMD_STOP;
 260	else
 261		if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
 262			cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263
 264	if (cmd->flags & MMC_RSP_PRESENT) {
 265		/* We expect a response, so set this bit */
 266		cmdr |= SDMMC_CMD_RESP_EXP;
 267		if (cmd->flags & MMC_RSP_136)
 268			cmdr |= SDMMC_CMD_RESP_LONG;
 269	}
 270
 271	if (cmd->flags & MMC_RSP_CRC)
 272		cmdr |= SDMMC_CMD_RESP_CRC;
 273
 274	data = cmd->data;
 275	if (data) {
 276		cmdr |= SDMMC_CMD_DAT_EXP;
 277		if (data->flags & MMC_DATA_STREAM)
 278			cmdr |= SDMMC_CMD_STRM_MODE;
 279		if (data->flags & MMC_DATA_WRITE)
 280			cmdr |= SDMMC_CMD_DAT_WR;
 281	}
 282
 283	if (drv_data && drv_data->prepare_command)
 284		drv_data->prepare_command(slot->host, &cmdr);
 285
 286	return cmdr;
 287}
 288
 289static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
 290{
 291	struct mmc_command *stop;
 292	u32 cmdr;
 293
 294	if (!cmd->data)
 295		return 0;
 296
 297	stop = &host->stop_abort;
 298	cmdr = cmd->opcode;
 299	memset(stop, 0, sizeof(struct mmc_command));
 300
 301	if (cmdr == MMC_READ_SINGLE_BLOCK ||
 302	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
 303	    cmdr == MMC_WRITE_BLOCK ||
 304	    cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
 
 
 305		stop->opcode = MMC_STOP_TRANSMISSION;
 306		stop->arg = 0;
 307		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 308	} else if (cmdr == SD_IO_RW_EXTENDED) {
 309		stop->opcode = SD_IO_RW_DIRECT;
 310		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
 311			     ((cmd->arg >> 28) & 0x7);
 312		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
 313	} else {
 314		return 0;
 315	}
 316
 317	cmdr = stop->opcode | SDMMC_CMD_STOP |
 318		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
 319
 
 
 
 320	return cmdr;
 321}
 322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323static void dw_mci_start_command(struct dw_mci *host,
 324				 struct mmc_command *cmd, u32 cmd_flags)
 325{
 326	host->cmd = cmd;
 327	dev_vdbg(host->dev,
 328		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
 329		 cmd->arg, cmd_flags);
 330
 331	mci_writel(host, CMDARG, cmd->arg);
 332	wmb();
 
 333
 334	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
 
 
 
 
 335}
 336
 337static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
 338{
 339	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
 
 340	dw_mci_start_command(host, stop, host->stop_cmdr);
 341}
 342
 343/* DMA interface functions */
 344static void dw_mci_stop_dma(struct dw_mci *host)
 345{
 346	if (host->using_dma) {
 347		host->dma_ops->stop(host);
 348		host->dma_ops->cleanup(host);
 349	}
 350
 351	/* Data transfer was stopped by the interrupt handler */
 352	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 353}
 354
 355static int dw_mci_get_dma_dir(struct mmc_data *data)
 356{
 357	if (data->flags & MMC_DATA_WRITE)
 358		return DMA_TO_DEVICE;
 359	else
 360		return DMA_FROM_DEVICE;
 361}
 362
 363#ifdef CONFIG_MMC_DW_IDMAC
 364static void dw_mci_dma_cleanup(struct dw_mci *host)
 365{
 366	struct mmc_data *data = host->data;
 367
 368	if (data)
 369		if (!data->host_cookie)
 370			dma_unmap_sg(host->dev,
 371				     data->sg,
 372				     data->sg_len,
 373				     dw_mci_get_dma_dir(data));
 
 374}
 375
 376static void dw_mci_idmac_reset(struct dw_mci *host)
 377{
 378	u32 bmod = mci_readl(host, BMOD);
 379	/* Software reset of DMA */
 380	bmod |= SDMMC_IDMAC_SWRESET;
 381	mci_writel(host, BMOD, bmod);
 382}
 383
 384static void dw_mci_idmac_stop_dma(struct dw_mci *host)
 385{
 386	u32 temp;
 387
 388	/* Disable and reset the IDMAC interface */
 389	temp = mci_readl(host, CTRL);
 390	temp &= ~SDMMC_CTRL_USE_IDMAC;
 391	temp |= SDMMC_CTRL_DMA_RESET;
 392	mci_writel(host, CTRL, temp);
 393
 394	/* Stop the IDMAC running */
 395	temp = mci_readl(host, BMOD);
 396	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
 397	temp |= SDMMC_IDMAC_SWRESET;
 398	mci_writel(host, BMOD, temp);
 399}
 400
 401static void dw_mci_idmac_complete_dma(struct dw_mci *host)
 402{
 
 403	struct mmc_data *data = host->data;
 404
 405	dev_vdbg(host->dev, "DMA complete\n");
 406
 
 
 
 
 
 
 
 
 407	host->dma_ops->cleanup(host);
 408
 409	/*
 410	 * If the card was removed, data will be NULL. No point in trying to
 411	 * send the stop command or waiting for NBUSY in this case.
 412	 */
 413	if (data) {
 414		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 415		tasklet_schedule(&host->tasklet);
 416	}
 417}
 418
 419static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
 420				    unsigned int sg_len)
 421{
 422	int i;
 423	struct idmac_desc *desc = host->sg_cpu;
 424
 425	for (i = 0; i < sg_len; i++, desc++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 426		unsigned int length = sg_dma_len(&data->sg[i]);
 
 427		u32 mem_addr = sg_dma_address(&data->sg[i]);
 428
 429		/* Set the OWN bit and disable interrupts for this descriptor */
 430		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
 
 431
 432		/* Buffer length */
 433		IDMAC_SET_BUFFER1_SIZE(desc, length);
 434
 435		/* Physical address to DMA to/from */
 436		desc->des2 = mem_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437	}
 438
 439	/* Set first descriptor */
 440	desc = host->sg_cpu;
 441	desc->des0 |= IDMAC_DES0_FD;
 442
 443	/* Set last descriptor */
 444	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
 445	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
 446	desc->des0 |= IDMAC_DES0_LD;
 447
 448	wmb();
 
 
 
 
 
 
 449}
 450
 451static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 452{
 453	u32 temp;
 
 
 
 
 
 
 454
 455	dw_mci_translate_sglist(host, host->data, sg_len);
 
 
 
 
 
 
 
 
 456
 457	/* Select IDMAC interface */
 458	temp = mci_readl(host, CTRL);
 459	temp |= SDMMC_CTRL_USE_IDMAC;
 460	mci_writel(host, CTRL, temp);
 461
 
 462	wmb();
 463
 464	/* Enable the IDMAC */
 465	temp = mci_readl(host, BMOD);
 466	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
 467	mci_writel(host, BMOD, temp);
 468
 469	/* Start it running */
 470	mci_writel(host, PLDMND, 1);
 
 
 
 471}
 472
 473static int dw_mci_idmac_init(struct dw_mci *host)
 
 
 
 
 
 
 
 
 474{
 475	struct idmac_desc *p;
 476	int i;
 477
 478	/* Number of descriptors in the ring buffer */
 479	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480
 481	/* Forward link the descriptor list */
 482	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
 483		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
 484
 485	/* Set the last descriptor as the end-of-ring descriptor */
 486	p->des3 = host->sg_dma;
 487	p->des0 = IDMAC_DES0_ER;
 488
 489	dw_mci_idmac_reset(host);
 
 
 
 
 490
 491	/* Mask out interrupts - get Tx & Rx complete only */
 492	mci_writel(host, IDSTS, IDMAC_INT_CLR);
 493	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
 494		   SDMMC_IDMAC_INT_TI);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495
 496	/* Set the descriptor base address */
 497	mci_writel(host, DBADDR, host->sg_dma);
 498	return 0;
 499}
 500
 501static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
 502	.init = dw_mci_idmac_init,
 503	.start = dw_mci_idmac_start_dma,
 504	.stop = dw_mci_idmac_stop_dma,
 505	.complete = dw_mci_idmac_complete_dma,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506	.cleanup = dw_mci_dma_cleanup,
 507};
 508#endif /* CONFIG_MMC_DW_IDMAC */
 509
 510static int dw_mci_pre_dma_transfer(struct dw_mci *host,
 511				   struct mmc_data *data,
 512				   bool next)
 513{
 514	struct scatterlist *sg;
 515	unsigned int i, sg_len;
 516
 517	if (!next && data->host_cookie)
 518		return data->host_cookie;
 519
 520	/*
 521	 * We don't do DMA on "complex" transfers, i.e. with
 522	 * non-word-aligned buffers or lengths. Also, we don't bother
 523	 * with all the DMA setup overhead for short transfers.
 524	 */
 525	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
 526		return -EINVAL;
 527
 528	if (data->blksz & 3)
 529		return -EINVAL;
 530
 531	for_each_sg(data->sg, sg, data->sg_len, i) {
 532		if (sg->offset & 3 || sg->length & 3)
 533			return -EINVAL;
 534	}
 535
 536	sg_len = dma_map_sg(host->dev,
 537			    data->sg,
 538			    data->sg_len,
 539			    dw_mci_get_dma_dir(data));
 540	if (sg_len == 0)
 541		return -EINVAL;
 542
 543	if (next)
 544		data->host_cookie = sg_len;
 545
 546	return sg_len;
 547}
 548
 549static void dw_mci_pre_req(struct mmc_host *mmc,
 550			   struct mmc_request *mrq,
 551			   bool is_first_req)
 552{
 553	struct dw_mci_slot *slot = mmc_priv(mmc);
 554	struct mmc_data *data = mrq->data;
 555
 556	if (!slot->host->use_dma || !data)
 557		return;
 558
 559	if (data->host_cookie) {
 560		data->host_cookie = 0;
 561		return;
 562	}
 563
 564	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
 565		data->host_cookie = 0;
 
 566}
 567
 568static void dw_mci_post_req(struct mmc_host *mmc,
 569			    struct mmc_request *mrq,
 570			    int err)
 571{
 572	struct dw_mci_slot *slot = mmc_priv(mmc);
 573	struct mmc_data *data = mrq->data;
 574
 575	if (!slot->host->use_dma || !data)
 576		return;
 577
 578	if (data->host_cookie)
 579		dma_unmap_sg(slot->host->dev,
 580			     data->sg,
 581			     data->sg_len,
 582			     dw_mci_get_dma_dir(data));
 583	data->host_cookie = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584}
 585
 586static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
 587{
 588#ifdef CONFIG_MMC_DW_IDMAC
 589	unsigned int blksz = data->blksz;
 590	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 591	u32 fifo_width = 1 << host->data_shift;
 592	u32 blksz_depth = blksz / fifo_width, fifoth_val;
 593	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
 594	int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
 
 
 
 
 595
 596	tx_wmark = (host->fifo_depth) / 2;
 597	tx_wmark_invers = host->fifo_depth - tx_wmark;
 598
 599	/*
 600	 * MSIZE is '1',
 601	 * if blksz is not a multiple of the FIFO width
 602	 */
 603	if (blksz % fifo_width) {
 604		msize = 0;
 605		rx_wmark = 1;
 606		goto done;
 607	}
 608
 609	do {
 610		if (!((blksz_depth % mszs[idx]) ||
 611		     (tx_wmark_invers % mszs[idx]))) {
 612			msize = idx;
 613			rx_wmark = mszs[idx] - 1;
 614			break;
 615		}
 616	} while (--idx > 0);
 617	/*
 618	 * If idx is '0', it won't be tried
 619	 * Thus, initial values are uesed
 620	 */
 621done:
 622	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
 623	mci_writel(host, FIFOTH, fifoth_val);
 624#endif
 625}
 626
 627static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
 628{
 629	unsigned int blksz = data->blksz;
 630	u32 blksz_depth, fifo_depth;
 631	u16 thld_size;
 
 
 
 
 
 
 
 
 
 632
 633	WARN_ON(!(data->flags & MMC_DATA_READ));
 
 
 
 
 
 
 
 
 
 
 
 634
 635	if (host->timing != MMC_TIMING_MMC_HS200 &&
 636	    host->timing != MMC_TIMING_UHS_SDR104)
 
 637		goto disable;
 638
 639	blksz_depth = blksz / (1 << host->data_shift);
 640	fifo_depth = host->fifo_depth;
 641
 642	if (blksz_depth > fifo_depth)
 643		goto disable;
 644
 645	/*
 646	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
 647	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
 648	 * Currently just choose blksz.
 649	 */
 650	thld_size = blksz;
 651	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
 652	return;
 653
 654disable:
 655	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
 656}
 657
 658static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
 659{
 
 660	int sg_len;
 661	u32 temp;
 662
 663	host->using_dma = 0;
 664
 665	/* If we don't have a channel, we can't do DMA */
 666	if (!host->use_dma)
 667		return -ENODEV;
 668
 669	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
 670	if (sg_len < 0) {
 671		host->dma_ops->stop(host);
 672		return sg_len;
 673	}
 674
 675	host->using_dma = 1;
 676
 677	dev_vdbg(host->dev,
 678		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
 679		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
 680		 sg_len);
 
 
 681
 682	/*
 683	 * Decide the MSIZE and RX/TX Watermark.
 684	 * If current block size is same with previous size,
 685	 * no need to update fifoth.
 686	 */
 687	if (host->prev_blksz != data->blksz)
 688		dw_mci_adjust_fifoth(host, data);
 689
 690	/* Enable the DMA interface */
 691	temp = mci_readl(host, CTRL);
 692	temp |= SDMMC_CTRL_DMA_ENABLE;
 693	mci_writel(host, CTRL, temp);
 694
 695	/* Disable RX/TX IRQs, let DMA handle it */
 
 696	temp = mci_readl(host, INTMASK);
 697	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
 698	mci_writel(host, INTMASK, temp);
 
 699
 700	host->dma_ops->start(host, sg_len);
 
 
 
 
 
 
 
 701
 702	return 0;
 703}
 704
 705static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
 706{
 
 
 707	u32 temp;
 708
 709	data->error = -EINPROGRESS;
 710
 711	WARN_ON(host->data);
 712	host->sg = NULL;
 713	host->data = data;
 714
 715	if (data->flags & MMC_DATA_READ) {
 716		host->dir_status = DW_MCI_RECV_STATUS;
 717		dw_mci_ctrl_rd_thld(host, data);
 718	} else {
 719		host->dir_status = DW_MCI_SEND_STATUS;
 720	}
 
 721
 722	if (dw_mci_submit_data_dma(host, data)) {
 723		int flags = SG_MITER_ATOMIC;
 724		if (host->data->flags & MMC_DATA_READ)
 725			flags |= SG_MITER_TO_SG;
 726		else
 727			flags |= SG_MITER_FROM_SG;
 728
 729		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
 730		host->sg = data->sg;
 731		host->part_buf_start = 0;
 732		host->part_buf_count = 0;
 733
 734		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
 
 
 735		temp = mci_readl(host, INTMASK);
 736		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
 737		mci_writel(host, INTMASK, temp);
 
 738
 739		temp = mci_readl(host, CTRL);
 740		temp &= ~SDMMC_CTRL_DMA_ENABLE;
 741		mci_writel(host, CTRL, temp);
 742
 743		/*
 744		 * Use the initial fifoth_val for PIO mode.
 
 745		 * If next issued data may be transfered by DMA mode,
 746		 * prev_blksz should be invalidated.
 747		 */
 748		mci_writel(host, FIFOTH, host->fifoth_val);
 
 
 
 749		host->prev_blksz = 0;
 750	} else {
 751		/*
 752		 * Keep the current block size.
 753		 * It will be used to decide whether to update
 754		 * fifoth register next time.
 755		 */
 756		host->prev_blksz = data->blksz;
 757	}
 758}
 759
 760static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
 761{
 762	struct dw_mci *host = slot->host;
 763	unsigned long timeout = jiffies + msecs_to_jiffies(500);
 764	unsigned int cmd_status = 0;
 765
 766	mci_writel(host, CMDARG, arg);
 767	wmb();
 768	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
 769
 770	while (time_before(jiffies, timeout)) {
 771		cmd_status = mci_readl(host, CMD);
 772		if (!(cmd_status & SDMMC_CMD_START))
 773			return;
 774	}
 775	dev_err(&slot->mmc->class_dev,
 776		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
 777		cmd, arg, cmd_status);
 778}
 779
 780static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 781{
 782	struct dw_mci *host = slot->host;
 783	unsigned int clock = slot->clock;
 784	u32 div;
 785	u32 clk_en_a;
 
 
 
 
 
 
 
 786
 787	if (!clock) {
 788		mci_writel(host, CLKENA, 0);
 789		mci_send_cmd(slot,
 790			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 791	} else if (clock != host->current_speed || force_clkinit) {
 792		div = host->bus_hz / clock;
 793		if (host->bus_hz % clock && host->bus_hz > clock)
 794			/*
 795			 * move the + 1 after the divide to prevent
 796			 * over-clocking the card.
 797			 */
 798			div += 1;
 799
 800		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
 801
 802		if ((clock << div) != slot->__clk_old || force_clkinit)
 803			dev_info(&slot->mmc->class_dev,
 804				 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
 805				 slot->id, host->bus_hz, clock,
 806				 div ? ((host->bus_hz / div) >> 1) :
 807				 host->bus_hz, div);
 
 
 
 
 
 
 
 
 
 
 
 
 
 808
 809		/* disable clock */
 810		mci_writel(host, CLKENA, 0);
 811		mci_writel(host, CLKSRC, 0);
 812
 813		/* inform CIU */
 814		mci_send_cmd(slot,
 815			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 816
 817		/* set clock to desired speed */
 818		mci_writel(host, CLKDIV, div);
 819
 820		/* inform CIU */
 821		mci_send_cmd(slot,
 822			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 823
 824		/* enable clock; only low power if no SDIO */
 825		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
 826		if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
 827			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
 828		mci_writel(host, CLKENA, clk_en_a);
 829
 830		/* inform CIU */
 831		mci_send_cmd(slot,
 832			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 833
 834		/* keep the clock with reflecting clock dividor */
 835		slot->__clk_old = clock << div;
 
 
 836	}
 837
 838	host->current_speed = clock;
 839
 840	/* Set the current slot bus width */
 841	mci_writel(host, CTYPE, (slot->ctype << slot->id));
 842}
 843
 844static void __dw_mci_start_request(struct dw_mci *host,
 845				   struct dw_mci_slot *slot,
 846				   struct mmc_command *cmd)
 847{
 848	struct mmc_request *mrq;
 849	struct mmc_data	*data;
 850	u32 cmdflags;
 851
 852	mrq = slot->mrq;
 853	if (host->pdata->select_slot)
 854		host->pdata->select_slot(slot->id);
 855
 856	host->cur_slot = slot;
 857	host->mrq = mrq;
 858
 859	host->pending_events = 0;
 860	host->completed_events = 0;
 861	host->cmd_status = 0;
 862	host->data_status = 0;
 863	host->dir_status = 0;
 864
 865	data = cmd->data;
 866	if (data) {
 867		dw_mci_set_timeout(host);
 868		mci_writel(host, BYTCNT, data->blksz*data->blocks);
 869		mci_writel(host, BLKSIZ, data->blksz);
 870	}
 871
 872	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
 873
 874	/* this is the first command, send the initialization clock */
 875	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
 876		cmdflags |= SDMMC_CMD_INIT;
 877
 878	if (data) {
 879		dw_mci_submit_data(host, data);
 880		wmb();
 881	}
 882
 883	dw_mci_start_command(host, cmd, cmdflags);
 884
 885	if (mrq->stop)
 886		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
 887	else
 888		host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 889}
 890
 891static void dw_mci_start_request(struct dw_mci *host,
 892				 struct dw_mci_slot *slot)
 893{
 894	struct mmc_request *mrq = slot->mrq;
 895	struct mmc_command *cmd;
 896
 897	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
 898	__dw_mci_start_request(host, slot, cmd);
 899}
 900
 901/* must be called with host->lock held */
 902static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
 903				 struct mmc_request *mrq)
 904{
 905	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
 906		 host->state);
 907
 908	slot->mrq = mrq;
 909
 
 
 
 
 
 
 
 
 
 
 
 910	if (host->state == STATE_IDLE) {
 911		host->state = STATE_SENDING_CMD;
 912		dw_mci_start_request(host, slot);
 913	} else {
 914		list_add_tail(&slot->queue_node, &host->queue);
 915	}
 916}
 917
 918static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 919{
 920	struct dw_mci_slot *slot = mmc_priv(mmc);
 921	struct dw_mci *host = slot->host;
 922
 923	WARN_ON(slot->mrq);
 924
 925	/*
 926	 * The check for card presence and queueing of the request must be
 927	 * atomic, otherwise the card could be removed in between and the
 928	 * request wouldn't fail until another card was inserted.
 929	 */
 930	spin_lock_bh(&host->lock);
 931
 932	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
 933		spin_unlock_bh(&host->lock);
 934		mrq->cmd->error = -ENOMEDIUM;
 935		mmc_request_done(mmc, mrq);
 936		return;
 937	}
 938
 
 
 939	dw_mci_queue_request(host, slot, mrq);
 940
 941	spin_unlock_bh(&host->lock);
 942}
 943
 944static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 945{
 946	struct dw_mci_slot *slot = mmc_priv(mmc);
 947	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
 948	u32 regs;
 
 949
 950	switch (ios->bus_width) {
 951	case MMC_BUS_WIDTH_4:
 952		slot->ctype = SDMMC_CTYPE_4BIT;
 953		break;
 954	case MMC_BUS_WIDTH_8:
 955		slot->ctype = SDMMC_CTYPE_8BIT;
 956		break;
 957	default:
 958		/* set default 1 bit mode */
 959		slot->ctype = SDMMC_CTYPE_1BIT;
 960	}
 961
 962	regs = mci_readl(slot->host, UHS_REG);
 963
 964	/* DDR mode set */
 965	if (ios->timing == MMC_TIMING_UHS_DDR50)
 
 
 966		regs |= ((0x1 << slot->id) << 16);
 967	else
 968		regs &= ~((0x1 << slot->id) << 16);
 969
 970	mci_writel(slot->host, UHS_REG, regs);
 971	slot->host->timing = ios->timing;
 972
 973	/*
 974	 * Use mirror of ios->clock to prevent race with mmc
 975	 * core ios update when finding the minimum.
 976	 */
 977	slot->clock = ios->clock;
 978
 979	if (drv_data && drv_data->set_ios)
 980		drv_data->set_ios(slot->host, ios);
 981
 982	/* Slot specific timing and width adjustment */
 983	dw_mci_setup_bus(slot, false);
 984
 985	switch (ios->power_mode) {
 986	case MMC_POWER_UP:
 
 
 
 
 
 
 
 
 
 
 987		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
 988		/* Power up slot */
 989		if (slot->host->pdata->setpower)
 990			slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
 991		regs = mci_readl(slot->host, PWREN);
 992		regs |= (1 << slot->id);
 993		mci_writel(slot->host, PWREN, regs);
 994		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995	case MMC_POWER_OFF:
 996		/* Power down slot */
 997		if (slot->host->pdata->setpower)
 998			slot->host->pdata->setpower(slot->id, 0);
 
 
 
 
 
 
 
 999		regs = mci_readl(slot->host, PWREN);
1000		regs &= ~(1 << slot->id);
1001		mci_writel(slot->host, PWREN, regs);
1002		break;
1003	default:
1004		break;
1005	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006}
1007
1008static int dw_mci_get_ro(struct mmc_host *mmc)
1009{
1010	int read_only;
1011	struct dw_mci_slot *slot = mmc_priv(mmc);
1012	struct dw_mci_board *brd = slot->host->pdata;
1013
1014	/* Use platform get_ro function, else try on board write protect */
1015	if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1016		read_only = 0;
1017	else if (brd->get_ro)
1018		read_only = brd->get_ro(slot->id);
1019	else if (gpio_is_valid(slot->wp_gpio))
1020		read_only = gpio_get_value(slot->wp_gpio);
1021	else
1022		read_only =
1023			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1024
1025	dev_dbg(&mmc->class_dev, "card is %s\n",
1026		read_only ? "read-only" : "read-write");
1027
1028	return read_only;
1029}
1030
1031static int dw_mci_get_cd(struct mmc_host *mmc)
1032{
1033	int present;
1034	struct dw_mci_slot *slot = mmc_priv(mmc);
1035	struct dw_mci_board *brd = slot->host->pdata;
1036	struct dw_mci *host = slot->host;
1037	int gpio_cd = mmc_gpio_get_cd(mmc);
1038
1039	/* Use platform get_cd function, else try onboard card detect */
1040	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1041		present = 1;
1042	else if (brd->get_cd)
1043		present = !brd->get_cd(slot->id);
1044	else if (!IS_ERR_VALUE(gpio_cd))
1045		present = gpio_cd;
1046	else
1047		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1048			== 0 ? 1 : 0;
1049
1050	spin_lock_bh(&host->lock);
1051	if (present) {
1052		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1053		dev_dbg(&mmc->class_dev, "card is present\n");
1054	} else {
1055		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1056		dev_dbg(&mmc->class_dev, "card is not present\n");
1057	}
1058	spin_unlock_bh(&host->lock);
1059
1060	return present;
 
 
 
 
 
 
 
 
 
 
 
 
1061}
1062
1063/*
1064 * Disable lower power mode.
1065 *
1066 * Low power mode will stop the card clock when idle.  According to the
1067 * description of the CLKENA register we should disable low power mode
1068 * for SDIO cards if we need SDIO interrupts to work.
1069 *
1070 * This function is fast if low power mode is already disabled.
1071 */
1072static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1073{
 
1074	struct dw_mci *host = slot->host;
1075	u32 clk_en_a;
1076	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1077
1078	clk_en_a = mci_readl(host, CLKENA);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079
1080	if (clk_en_a & clken_low_pwr) {
1081		mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1082		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1083			     SDMMC_CMD_PRV_DAT_WAIT, 0);
 
1084	}
1085}
1086
1087static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1088{
1089	struct dw_mci_slot *slot = mmc_priv(mmc);
1090	struct dw_mci *host = slot->host;
 
1091	u32 int_mask;
1092
 
 
1093	/* Enable/disable Slot Specific SDIO interrupt */
1094	int_mask = mci_readl(host, INTMASK);
1095	if (enb) {
1096		/*
1097		 * Turn off low power mode if it was enabled.  This is a bit of
1098		 * a heavy operation and we disable / enable IRQs a lot, so
1099		 * we'll leave low power mode disabled and it will get
1100		 * re-enabled again in dw_mci_setup_bus().
1101		 */
1102		dw_mci_disable_low_power(slot);
1103
1104		mci_writel(host, INTMASK,
1105			   (int_mask | SDMMC_INT_SDIO(slot->id)));
1106	} else {
1107		mci_writel(host, INTMASK,
1108			   (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1109	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110}
1111
1112static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1113{
1114	struct dw_mci_slot *slot = mmc_priv(mmc);
1115	struct dw_mci *host = slot->host;
1116	const struct dw_mci_drv_data *drv_data = host->drv_data;
1117	struct dw_mci_tuning_data tuning_data;
1118	int err = -ENOSYS;
1119
1120	if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1121		if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1122			tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1123			tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1124		} else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1125			tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1126			tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1127		} else {
1128			return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1129		}
1130	} else if (opcode == MMC_SEND_TUNING_BLOCK) {
1131		tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1132		tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
 
1133	} else {
1134		dev_err(host->dev,
1135			"Undefined command(%d) for tuning\n", opcode);
1136		return -EINVAL;
 
 
 
 
1137	}
1138
1139	if (drv_data && drv_data->execute_tuning)
1140		err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1141	return err;
 
 
 
 
 
 
 
 
1142}
1143
1144static const struct mmc_host_ops dw_mci_ops = {
1145	.request		= dw_mci_request,
1146	.pre_req		= dw_mci_pre_req,
1147	.post_req		= dw_mci_post_req,
1148	.set_ios		= dw_mci_set_ios,
1149	.get_ro			= dw_mci_get_ro,
1150	.get_cd			= dw_mci_get_cd,
 
1151	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
 
1152	.execute_tuning		= dw_mci_execute_tuning,
 
 
 
 
1153};
1154
1155static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1156	__releases(&host->lock)
1157	__acquires(&host->lock)
1158{
1159	struct dw_mci_slot *slot;
1160	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
1161
1162	WARN_ON(host->cmd || host->data);
1163
1164	host->cur_slot->mrq = NULL;
1165	host->mrq = NULL;
1166	if (!list_empty(&host->queue)) {
1167		slot = list_entry(host->queue.next,
1168				  struct dw_mci_slot, queue_node);
1169		list_del(&slot->queue_node);
1170		dev_vdbg(host->dev, "list not empty: %s is next\n",
1171			 mmc_hostname(slot->mmc));
1172		host->state = STATE_SENDING_CMD;
1173		dw_mci_start_request(host, slot);
1174	} else {
1175		dev_vdbg(host->dev, "list empty\n");
1176		host->state = STATE_IDLE;
 
 
 
 
1177	}
1178
1179	spin_unlock(&host->lock);
1180	mmc_request_done(prev_mmc, mrq);
1181	spin_lock(&host->lock);
1182}
1183
1184static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1185{
1186	u32 status = host->cmd_status;
1187
1188	host->cmd_status = 0;
1189
1190	/* Read the response from the card (up to 16 bytes) */
1191	if (cmd->flags & MMC_RSP_PRESENT) {
1192		if (cmd->flags & MMC_RSP_136) {
1193			cmd->resp[3] = mci_readl(host, RESP0);
1194			cmd->resp[2] = mci_readl(host, RESP1);
1195			cmd->resp[1] = mci_readl(host, RESP2);
1196			cmd->resp[0] = mci_readl(host, RESP3);
1197		} else {
1198			cmd->resp[0] = mci_readl(host, RESP0);
1199			cmd->resp[1] = 0;
1200			cmd->resp[2] = 0;
1201			cmd->resp[3] = 0;
1202		}
1203	}
1204
1205	if (status & SDMMC_INT_RTO)
1206		cmd->error = -ETIMEDOUT;
1207	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1208		cmd->error = -EILSEQ;
1209	else if (status & SDMMC_INT_RESP_ERR)
1210		cmd->error = -EIO;
1211	else
1212		cmd->error = 0;
1213
1214	if (cmd->error) {
1215		/* newer ip versions need a delay between retries */
1216		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1217			mdelay(20);
1218	}
1219
1220	return cmd->error;
1221}
1222
1223static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1224{
1225	u32 status = host->data_status;
1226
1227	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1228		if (status & SDMMC_INT_DRTO) {
1229			data->error = -ETIMEDOUT;
1230		} else if (status & SDMMC_INT_DCRC) {
1231			data->error = -EILSEQ;
1232		} else if (status & SDMMC_INT_EBE) {
1233			if (host->dir_status ==
1234				DW_MCI_SEND_STATUS) {
1235				/*
1236				 * No data CRC status was returned.
1237				 * The number of bytes transferred
1238				 * will be exaggerated in PIO mode.
1239				 */
1240				data->bytes_xfered = 0;
1241				data->error = -ETIMEDOUT;
1242			} else if (host->dir_status ==
1243					DW_MCI_RECV_STATUS) {
1244				data->error = -EIO;
1245			}
1246		} else {
1247			/* SDMMC_INT_SBE is included */
1248			data->error = -EIO;
1249		}
1250
1251		dev_err(host->dev, "data error, status 0x%08x\n", status);
1252
1253		/*
1254		 * After an error, there may be data lingering
1255		 * in the FIFO
1256		 */
1257		dw_mci_fifo_reset(host);
1258	} else {
1259		data->bytes_xfered = data->blocks * data->blksz;
1260		data->error = 0;
1261	}
1262
1263	return data->error;
1264}
1265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266static void dw_mci_tasklet_func(unsigned long priv)
1267{
1268	struct dw_mci *host = (struct dw_mci *)priv;
1269	struct mmc_data	*data;
1270	struct mmc_command *cmd;
1271	struct mmc_request *mrq;
1272	enum dw_mci_state state;
1273	enum dw_mci_state prev_state;
1274	unsigned int err;
1275
1276	spin_lock(&host->lock);
1277
1278	state = host->state;
1279	data = host->data;
1280	mrq = host->mrq;
1281
1282	do {
1283		prev_state = state;
1284
1285		switch (state) {
1286		case STATE_IDLE:
 
1287			break;
1288
 
1289		case STATE_SENDING_CMD:
1290			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1291						&host->pending_events))
1292				break;
1293
1294			cmd = host->cmd;
1295			host->cmd = NULL;
1296			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1297			err = dw_mci_command_complete(host, cmd);
1298			if (cmd == mrq->sbc && !err) {
1299				prev_state = state = STATE_SENDING_CMD;
1300				__dw_mci_start_request(host, host->cur_slot,
1301						       mrq->cmd);
1302				goto unlock;
1303			}
1304
1305			if (cmd->data && err) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1306				dw_mci_stop_dma(host);
1307				send_stop_abort(host, data);
1308				state = STATE_SENDING_STOP;
1309				break;
1310			}
1311
1312			if (!cmd->data || err) {
1313				dw_mci_request_end(host, mrq);
1314				goto unlock;
1315			}
1316
1317			prev_state = state = STATE_SENDING_DATA;
1318			/* fall through */
1319
1320		case STATE_SENDING_DATA:
 
 
 
 
 
 
 
 
1321			if (test_and_clear_bit(EVENT_DATA_ERROR,
1322					       &host->pending_events)) {
1323				dw_mci_stop_dma(host);
1324				send_stop_abort(host, data);
 
 
1325				state = STATE_DATA_ERROR;
1326				break;
1327			}
1328
1329			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1330						&host->pending_events))
 
 
 
 
 
 
1331				break;
 
1332
1333			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334			prev_state = state = STATE_DATA_BUSY;
1335			/* fall through */
 
1336
1337		case STATE_DATA_BUSY:
1338			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1339						&host->pending_events))
 
 
 
 
 
 
1340				break;
 
1341
1342			host->data = NULL;
1343			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1344			err = dw_mci_data_complete(host, data);
1345
1346			if (!err) {
1347				if (!data->stop || mrq->sbc) {
1348					if (mrq->sbc && data->stop)
1349						data->stop->error = 0;
1350					dw_mci_request_end(host, mrq);
1351					goto unlock;
1352				}
1353
1354				/* stop command for open-ended transfer*/
1355				if (data->stop)
1356					send_stop_abort(host, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357			}
1358
1359			/*
1360			 * If err has non-zero,
1361			 * stop-abort command has been already issued.
1362			 */
1363			prev_state = state = STATE_SENDING_STOP;
1364
1365			/* fall through */
1366
1367		case STATE_SENDING_STOP:
1368			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1369						&host->pending_events))
1370				break;
1371
1372			/* CMD error in data command */
1373			if (mrq->cmd->error && mrq->data)
1374				dw_mci_fifo_reset(host);
1375
1376			host->cmd = NULL;
1377			host->data = NULL;
1378
1379			if (mrq->stop)
1380				dw_mci_command_complete(host, mrq->stop);
1381			else
1382				host->cmd_status = 0;
1383
1384			dw_mci_request_end(host, mrq);
1385			goto unlock;
1386
1387		case STATE_DATA_ERROR:
1388			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1389						&host->pending_events))
1390				break;
1391
1392			state = STATE_DATA_BUSY;
1393			break;
1394		}
1395	} while (state != prev_state);
1396
1397	host->state = state;
1398unlock:
1399	spin_unlock(&host->lock);
1400
1401}
1402
1403/* push final bytes to part_buf, only use during push */
1404static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1405{
1406	memcpy((void *)&host->part_buf, buf, cnt);
1407	host->part_buf_count = cnt;
1408}
1409
1410/* append bytes to part_buf, only use during push */
1411static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1412{
1413	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1414	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1415	host->part_buf_count += cnt;
1416	return cnt;
1417}
1418
1419/* pull first bytes from part_buf, only use during pull */
1420static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1421{
1422	cnt = min(cnt, (int)host->part_buf_count);
1423	if (cnt) {
1424		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1425		       cnt);
1426		host->part_buf_count -= cnt;
1427		host->part_buf_start += cnt;
1428	}
1429	return cnt;
1430}
1431
1432/* pull final bytes from the part_buf, assuming it's just been filled */
1433static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1434{
1435	memcpy(buf, &host->part_buf, cnt);
1436	host->part_buf_start = cnt;
1437	host->part_buf_count = (1 << host->data_shift) - cnt;
1438}
1439
1440static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1441{
1442	struct mmc_data *data = host->data;
1443	int init_cnt = cnt;
1444
1445	/* try and push anything in the part_buf */
1446	if (unlikely(host->part_buf_count)) {
1447		int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1448		buf += len;
1449		cnt -= len;
1450		if (host->part_buf_count == 2) {
1451			mci_writew(host, DATA(host->data_offset),
1452					host->part_buf16);
1453			host->part_buf_count = 0;
1454		}
1455	}
1456#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1457	if (unlikely((unsigned long)buf & 0x1)) {
1458		while (cnt >= 2) {
1459			u16 aligned_buf[64];
1460			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1461			int items = len >> 1;
1462			int i;
1463			/* memcpy from input buffer into aligned buffer */
1464			memcpy(aligned_buf, buf, len);
1465			buf += len;
1466			cnt -= len;
1467			/* push data from aligned buffer into fifo */
1468			for (i = 0; i < items; ++i)
1469				mci_writew(host, DATA(host->data_offset),
1470						aligned_buf[i]);
1471		}
1472	} else
1473#endif
1474	{
1475		u16 *pdata = buf;
 
1476		for (; cnt >= 2; cnt -= 2)
1477			mci_writew(host, DATA(host->data_offset), *pdata++);
1478		buf = pdata;
1479	}
1480	/* put anything remaining in the part_buf */
1481	if (cnt) {
1482		dw_mci_set_part_bytes(host, buf, cnt);
1483		 /* Push data if we have reached the expected data length */
1484		if ((data->bytes_xfered + init_cnt) ==
1485		    (data->blksz * data->blocks))
1486			mci_writew(host, DATA(host->data_offset),
1487				   host->part_buf16);
1488	}
1489}
1490
1491static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1492{
1493#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1494	if (unlikely((unsigned long)buf & 0x1)) {
1495		while (cnt >= 2) {
1496			/* pull data from fifo into aligned buffer */
1497			u16 aligned_buf[64];
1498			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1499			int items = len >> 1;
1500			int i;
 
1501			for (i = 0; i < items; ++i)
1502				aligned_buf[i] = mci_readw(host,
1503						DATA(host->data_offset));
1504			/* memcpy from aligned buffer into output buffer */
1505			memcpy(buf, aligned_buf, len);
1506			buf += len;
1507			cnt -= len;
1508		}
1509	} else
1510#endif
1511	{
1512		u16 *pdata = buf;
 
1513		for (; cnt >= 2; cnt -= 2)
1514			*pdata++ = mci_readw(host, DATA(host->data_offset));
1515		buf = pdata;
1516	}
1517	if (cnt) {
1518		host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1519		dw_mci_pull_final_bytes(host, buf, cnt);
1520	}
1521}
1522
1523static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1524{
1525	struct mmc_data *data = host->data;
1526	int init_cnt = cnt;
1527
1528	/* try and push anything in the part_buf */
1529	if (unlikely(host->part_buf_count)) {
1530		int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1531		buf += len;
1532		cnt -= len;
1533		if (host->part_buf_count == 4) {
1534			mci_writel(host, DATA(host->data_offset),
1535					host->part_buf32);
1536			host->part_buf_count = 0;
1537		}
1538	}
1539#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1540	if (unlikely((unsigned long)buf & 0x3)) {
1541		while (cnt >= 4) {
1542			u32 aligned_buf[32];
1543			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1544			int items = len >> 2;
1545			int i;
1546			/* memcpy from input buffer into aligned buffer */
1547			memcpy(aligned_buf, buf, len);
1548			buf += len;
1549			cnt -= len;
1550			/* push data from aligned buffer into fifo */
1551			for (i = 0; i < items; ++i)
1552				mci_writel(host, DATA(host->data_offset),
1553						aligned_buf[i]);
1554		}
1555	} else
1556#endif
1557	{
1558		u32 *pdata = buf;
 
1559		for (; cnt >= 4; cnt -= 4)
1560			mci_writel(host, DATA(host->data_offset), *pdata++);
1561		buf = pdata;
1562	}
1563	/* put anything remaining in the part_buf */
1564	if (cnt) {
1565		dw_mci_set_part_bytes(host, buf, cnt);
1566		 /* Push data if we have reached the expected data length */
1567		if ((data->bytes_xfered + init_cnt) ==
1568		    (data->blksz * data->blocks))
1569			mci_writel(host, DATA(host->data_offset),
1570				   host->part_buf32);
1571	}
1572}
1573
1574static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1575{
1576#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1577	if (unlikely((unsigned long)buf & 0x3)) {
1578		while (cnt >= 4) {
1579			/* pull data from fifo into aligned buffer */
1580			u32 aligned_buf[32];
1581			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1582			int items = len >> 2;
1583			int i;
 
1584			for (i = 0; i < items; ++i)
1585				aligned_buf[i] = mci_readl(host,
1586						DATA(host->data_offset));
1587			/* memcpy from aligned buffer into output buffer */
1588			memcpy(buf, aligned_buf, len);
1589			buf += len;
1590			cnt -= len;
1591		}
1592	} else
1593#endif
1594	{
1595		u32 *pdata = buf;
 
1596		for (; cnt >= 4; cnt -= 4)
1597			*pdata++ = mci_readl(host, DATA(host->data_offset));
1598		buf = pdata;
1599	}
1600	if (cnt) {
1601		host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1602		dw_mci_pull_final_bytes(host, buf, cnt);
1603	}
1604}
1605
1606static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1607{
1608	struct mmc_data *data = host->data;
1609	int init_cnt = cnt;
1610
1611	/* try and push anything in the part_buf */
1612	if (unlikely(host->part_buf_count)) {
1613		int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1614		buf += len;
1615		cnt -= len;
1616
1617		if (host->part_buf_count == 8) {
1618			mci_writeq(host, DATA(host->data_offset),
1619					host->part_buf);
1620			host->part_buf_count = 0;
1621		}
1622	}
1623#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1624	if (unlikely((unsigned long)buf & 0x7)) {
1625		while (cnt >= 8) {
1626			u64 aligned_buf[16];
1627			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1628			int items = len >> 3;
1629			int i;
1630			/* memcpy from input buffer into aligned buffer */
1631			memcpy(aligned_buf, buf, len);
1632			buf += len;
1633			cnt -= len;
1634			/* push data from aligned buffer into fifo */
1635			for (i = 0; i < items; ++i)
1636				mci_writeq(host, DATA(host->data_offset),
1637						aligned_buf[i]);
1638		}
1639	} else
1640#endif
1641	{
1642		u64 *pdata = buf;
 
1643		for (; cnt >= 8; cnt -= 8)
1644			mci_writeq(host, DATA(host->data_offset), *pdata++);
1645		buf = pdata;
1646	}
1647	/* put anything remaining in the part_buf */
1648	if (cnt) {
1649		dw_mci_set_part_bytes(host, buf, cnt);
1650		/* Push data if we have reached the expected data length */
1651		if ((data->bytes_xfered + init_cnt) ==
1652		    (data->blksz * data->blocks))
1653			mci_writeq(host, DATA(host->data_offset),
1654				   host->part_buf);
1655	}
1656}
1657
1658static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1659{
1660#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1661	if (unlikely((unsigned long)buf & 0x7)) {
1662		while (cnt >= 8) {
1663			/* pull data from fifo into aligned buffer */
1664			u64 aligned_buf[16];
1665			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1666			int items = len >> 3;
1667			int i;
 
1668			for (i = 0; i < items; ++i)
1669				aligned_buf[i] = mci_readq(host,
1670						DATA(host->data_offset));
1671			/* memcpy from aligned buffer into output buffer */
1672			memcpy(buf, aligned_buf, len);
1673			buf += len;
1674			cnt -= len;
1675		}
1676	} else
1677#endif
1678	{
1679		u64 *pdata = buf;
 
1680		for (; cnt >= 8; cnt -= 8)
1681			*pdata++ = mci_readq(host, DATA(host->data_offset));
1682		buf = pdata;
1683	}
1684	if (cnt) {
1685		host->part_buf = mci_readq(host, DATA(host->data_offset));
1686		dw_mci_pull_final_bytes(host, buf, cnt);
1687	}
1688}
1689
1690static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1691{
1692	int len;
1693
1694	/* get remaining partial bytes */
1695	len = dw_mci_pull_part_bytes(host, buf, cnt);
1696	if (unlikely(len == cnt))
1697		return;
1698	buf += len;
1699	cnt -= len;
1700
1701	/* get the rest of the data */
1702	host->pull_data(host, buf, cnt);
1703}
1704
1705static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1706{
1707	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1708	void *buf;
1709	unsigned int offset;
1710	struct mmc_data	*data = host->data;
1711	int shift = host->data_shift;
1712	u32 status;
1713	unsigned int len;
1714	unsigned int remain, fcnt;
1715
1716	do {
1717		if (!sg_miter_next(sg_miter))
1718			goto done;
1719
1720		host->sg = sg_miter->piter.sg;
1721		buf = sg_miter->addr;
1722		remain = sg_miter->length;
1723		offset = 0;
1724
1725		do {
1726			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1727					<< shift) + host->part_buf_count;
1728			len = min(remain, fcnt);
1729			if (!len)
1730				break;
1731			dw_mci_pull_data(host, (void *)(buf + offset), len);
1732			data->bytes_xfered += len;
1733			offset += len;
1734			remain -= len;
1735		} while (remain);
1736
1737		sg_miter->consumed = offset;
1738		status = mci_readl(host, MINTSTS);
1739		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1740	/* if the RXDR is ready read again */
1741	} while ((status & SDMMC_INT_RXDR) ||
1742		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1743
1744	if (!remain) {
1745		if (!sg_miter_next(sg_miter))
1746			goto done;
1747		sg_miter->consumed = 0;
1748	}
1749	sg_miter_stop(sg_miter);
1750	return;
1751
1752done:
1753	sg_miter_stop(sg_miter);
1754	host->sg = NULL;
1755	smp_wmb();
1756	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1757}
1758
1759static void dw_mci_write_data_pio(struct dw_mci *host)
1760{
1761	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1762	void *buf;
1763	unsigned int offset;
1764	struct mmc_data	*data = host->data;
1765	int shift = host->data_shift;
1766	u32 status;
1767	unsigned int len;
1768	unsigned int fifo_depth = host->fifo_depth;
1769	unsigned int remain, fcnt;
1770
1771	do {
1772		if (!sg_miter_next(sg_miter))
1773			goto done;
1774
1775		host->sg = sg_miter->piter.sg;
1776		buf = sg_miter->addr;
1777		remain = sg_miter->length;
1778		offset = 0;
1779
1780		do {
1781			fcnt = ((fifo_depth -
1782				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1783					<< shift) - host->part_buf_count;
1784			len = min(remain, fcnt);
1785			if (!len)
1786				break;
1787			host->push_data(host, (void *)(buf + offset), len);
1788			data->bytes_xfered += len;
1789			offset += len;
1790			remain -= len;
1791		} while (remain);
1792
1793		sg_miter->consumed = offset;
1794		status = mci_readl(host, MINTSTS);
1795		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1796	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1797
1798	if (!remain) {
1799		if (!sg_miter_next(sg_miter))
1800			goto done;
1801		sg_miter->consumed = 0;
1802	}
1803	sg_miter_stop(sg_miter);
1804	return;
1805
1806done:
1807	sg_miter_stop(sg_miter);
1808	host->sg = NULL;
1809	smp_wmb();
1810	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1811}
1812
1813static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1814{
 
 
1815	if (!host->cmd_status)
1816		host->cmd_status = status;
1817
1818	smp_wmb();
1819
1820	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1821	tasklet_schedule(&host->tasklet);
1822}
1823
 
 
 
 
 
 
 
 
 
 
1824static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1825{
1826	struct dw_mci *host = dev_id;
1827	u32 pending;
1828	int i;
 
1829
1830	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1831
1832	/*
1833	 * DTO fix - version 2.10a and below, and only if internal DMA
1834	 * is configured.
1835	 */
1836	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1837		if (!pending &&
1838		    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1839			pending |= SDMMC_INT_DATA_OVER;
1840	}
1841
1842	if (pending) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1843		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
 
 
 
1844			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1845			host->cmd_status = pending;
1846			smp_wmb();
1847			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
 
 
1848		}
1849
1850		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1851			/* if there is an error report DATA_ERROR */
1852			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1853			host->data_status = pending;
1854			smp_wmb();
1855			set_bit(EVENT_DATA_ERROR, &host->pending_events);
1856			tasklet_schedule(&host->tasklet);
1857		}
1858
1859		if (pending & SDMMC_INT_DATA_OVER) {
 
 
 
 
1860			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1861			if (!host->data_status)
1862				host->data_status = pending;
1863			smp_wmb();
1864			if (host->dir_status == DW_MCI_RECV_STATUS) {
1865				if (host->sg != NULL)
1866					dw_mci_read_data_pio(host, true);
1867			}
1868			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1869			tasklet_schedule(&host->tasklet);
 
 
1870		}
1871
1872		if (pending & SDMMC_INT_RXDR) {
1873			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1874			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1875				dw_mci_read_data_pio(host, false);
1876		}
1877
1878		if (pending & SDMMC_INT_TXDR) {
1879			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1880			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1881				dw_mci_write_data_pio(host);
1882		}
1883
1884		if (pending & SDMMC_INT_CMD_DONE) {
 
 
1885			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1886			dw_mci_cmd_interrupt(host, pending);
 
 
1887		}
1888
1889		if (pending & SDMMC_INT_CD) {
1890			mci_writel(host, RINTSTS, SDMMC_INT_CD);
1891			queue_work(host->card_workqueue, &host->card_work);
1892		}
1893
1894		/* Handle SDIO Interrupts */
1895		for (i = 0; i < host->num_slots; i++) {
1896			struct dw_mci_slot *slot = host->slot[i];
1897			if (pending & SDMMC_INT_SDIO(i)) {
1898				mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1899				mmc_signal_sdio_irq(slot->mmc);
1900			}
1901		}
1902
1903	}
1904
1905#ifdef CONFIG_MMC_DW_IDMAC
1906	/* Handle DMA interrupts */
1907	pending = mci_readl(host, IDSTS);
1908	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1909		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1910		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1911		host->dma_ops->complete(host);
1912	}
1913#endif
1914
1915	return IRQ_HANDLED;
1916}
1917
1918static void dw_mci_work_routine_card(struct work_struct *work)
1919{
1920	struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1921	int i;
1922
1923	for (i = 0; i < host->num_slots; i++) {
1924		struct dw_mci_slot *slot = host->slot[i];
1925		struct mmc_host *mmc = slot->mmc;
1926		struct mmc_request *mrq;
1927		int present;
1928
1929		present = dw_mci_get_cd(mmc);
1930		while (present != slot->last_detect_state) {
1931			dev_dbg(&slot->mmc->class_dev, "card %s\n",
1932				present ? "inserted" : "removed");
1933
1934			spin_lock_bh(&host->lock);
1935
1936			/* Card change detected */
1937			slot->last_detect_state = present;
1938
1939			/* Clean up queue if present */
1940			mrq = slot->mrq;
1941			if (mrq) {
1942				if (mrq == host->mrq) {
1943					host->data = NULL;
1944					host->cmd = NULL;
1945
1946					switch (host->state) {
1947					case STATE_IDLE:
1948						break;
1949					case STATE_SENDING_CMD:
1950						mrq->cmd->error = -ENOMEDIUM;
1951						if (!mrq->data)
1952							break;
1953						/* fall through */
1954					case STATE_SENDING_DATA:
1955						mrq->data->error = -ENOMEDIUM;
1956						dw_mci_stop_dma(host);
1957						break;
1958					case STATE_DATA_BUSY:
1959					case STATE_DATA_ERROR:
1960						if (mrq->data->error == -EINPROGRESS)
1961							mrq->data->error = -ENOMEDIUM;
1962						/* fall through */
1963					case STATE_SENDING_STOP:
1964						if (mrq->stop)
1965							mrq->stop->error = -ENOMEDIUM;
1966						break;
1967					}
1968
1969					dw_mci_request_end(host, mrq);
1970				} else {
1971					list_del(&slot->queue_node);
1972					mrq->cmd->error = -ENOMEDIUM;
1973					if (mrq->data)
1974						mrq->data->error = -ENOMEDIUM;
1975					if (mrq->stop)
1976						mrq->stop->error = -ENOMEDIUM;
1977
1978					spin_unlock(&host->lock);
1979					mmc_request_done(slot->mmc, mrq);
1980					spin_lock(&host->lock);
1981				}
1982			}
1983
1984			/* Power down slot */
1985			if (present == 0) {
1986				/* Clear down the FIFO */
1987				dw_mci_fifo_reset(host);
1988#ifdef CONFIG_MMC_DW_IDMAC
1989				dw_mci_idmac_reset(host);
1990#endif
1991
1992			}
1993
1994			spin_unlock_bh(&host->lock);
1995
1996			present = dw_mci_get_cd(mmc);
1997		}
1998
1999		mmc_detect_change(slot->mmc,
2000			msecs_to_jiffies(host->pdata->detect_delay_ms));
2001	}
2002}
2003
2004#ifdef CONFIG_OF
2005/* given a slot id, find out the device node representing that slot */
2006static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2007{
2008	struct device_node *np;
2009	const __be32 *addr;
2010	int len;
2011
2012	if (!dev || !dev->of_node)
2013		return NULL;
2014
2015	for_each_child_of_node(dev->of_node, np) {
2016		addr = of_get_property(np, "reg", &len);
2017		if (!addr || (len < sizeof(int)))
2018			continue;
2019		if (be32_to_cpup(addr) == slot)
2020			return np;
2021	}
2022	return NULL;
2023}
2024
2025static struct dw_mci_of_slot_quirks {
2026	char *quirk;
2027	int id;
2028} of_slot_quirks[] = {
2029	{
2030		.quirk	= "disable-wp",
2031		.id	= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2032	},
2033};
2034
2035static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2036{
2037	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2038	int quirks = 0;
2039	int idx;
2040
2041	/* get quirks */
2042	for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2043		if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2044			quirks |= of_slot_quirks[idx].id;
2045
2046	return quirks;
2047}
2048
2049/* find out bus-width for a given slot */
2050static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2051{
2052	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2053	u32 bus_wd = 1;
2054
2055	if (!np)
2056		return 1;
2057
2058	if (of_property_read_u32(np, "bus-width", &bus_wd))
2059		dev_err(dev, "bus-width property not found, assuming width"
2060			       " as 1\n");
2061	return bus_wd;
2062}
2063
2064/* find the write protect gpio for a given slot; or -1 if none specified */
2065static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2066{
2067	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2068	int gpio;
2069
2070	if (!np)
2071		return -EINVAL;
2072
2073	gpio = of_get_named_gpio(np, "wp-gpios", 0);
 
2074
2075	/* Having a missing entry is valid; return silently */
2076	if (!gpio_is_valid(gpio))
2077		return -EINVAL;
2078
2079	if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2080		dev_warn(dev, "gpio [%d] request failed\n", gpio);
2081		return -EINVAL;
 
 
 
2082	}
2083
2084	return gpio;
2085}
2086
2087/* find the cd gpio for a given slot */
2088static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2089					struct mmc_host *mmc)
2090{
2091	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2092	int gpio;
2093
2094	if (!np)
2095		return;
2096
2097	gpio = of_get_named_gpio(np, "cd-gpios", 0);
 
 
2098
2099	/* Having a missing entry is valid; return silently */
2100	if (!gpio_is_valid(gpio))
2101		return;
2102
2103	if (mmc_gpio_request_cd(mmc, gpio, 0))
2104		dev_warn(dev, "gpio [%d] request failed\n", gpio);
2105}
2106#else /* CONFIG_OF */
2107static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2108{
2109	return 0;
2110}
2111static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2112{
2113	return 1;
2114}
2115static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2116{
2117	return NULL;
2118}
2119static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2120{
2121	return -EINVAL;
2122}
2123static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2124					struct mmc_host *mmc)
2125{
2126	return;
2127}
2128#endif /* CONFIG_OF */
2129
2130static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2131{
2132	struct mmc_host *mmc;
2133	struct dw_mci_slot *slot;
2134	const struct dw_mci_drv_data *drv_data = host->drv_data;
2135	int ctrl_id, ret;
2136	u32 freq[2];
2137	u8 bus_width;
2138
2139	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2140	if (!mmc)
2141		return -ENOMEM;
2142
2143	slot = mmc_priv(mmc);
2144	slot->id = id;
 
2145	slot->mmc = mmc;
2146	slot->host = host;
2147	host->slot[id] = slot;
2148
2149	slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2150
2151	mmc->ops = &dw_mci_ops;
2152	if (of_property_read_u32_array(host->dev->of_node,
2153				       "clock-freq-min-max", freq, 2)) {
2154		mmc->f_min = DW_MCI_FREQ_MIN;
2155		mmc->f_max = DW_MCI_FREQ_MAX;
2156	} else {
2157		mmc->f_min = freq[0];
2158		mmc->f_max = freq[1];
2159	}
2160
2161	if (host->pdata->get_ocr)
2162		mmc->ocr_avail = host->pdata->get_ocr(id);
2163	else
2164		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2165
2166	/*
2167	 * Start with slot power disabled, it will be enabled when a card
2168	 * is detected.
2169	 */
2170	if (host->pdata->setpower)
2171		host->pdata->setpower(id, 0);
2172
2173	if (host->pdata->caps)
2174		mmc->caps = host->pdata->caps;
2175
2176	if (host->pdata->pm_caps)
2177		mmc->pm_caps = host->pdata->pm_caps;
2178
2179	if (host->dev->of_node) {
2180		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2181		if (ctrl_id < 0)
2182			ctrl_id = 0;
2183	} else {
2184		ctrl_id = to_platform_device(host->dev)->id;
2185	}
2186	if (drv_data && drv_data->caps)
2187		mmc->caps |= drv_data->caps[ctrl_id];
2188
2189	if (host->pdata->caps2)
2190		mmc->caps2 = host->pdata->caps2;
2191
2192	if (host->pdata->get_bus_wd)
2193		bus_width = host->pdata->get_bus_wd(slot->id);
2194	else if (host->dev->of_node)
2195		bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2196	else
2197		bus_width = 1;
2198
2199	switch (bus_width) {
2200	case 8:
2201		mmc->caps |= MMC_CAP_8_BIT_DATA;
2202	case 4:
2203		mmc->caps |= MMC_CAP_4_BIT_DATA;
2204	}
2205
2206	if (host->pdata->blk_settings) {
2207		mmc->max_segs = host->pdata->blk_settings->max_segs;
2208		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2209		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2210		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2211		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2212	} else {
2213		/* Useful defaults if platform data is unset. */
2214#ifdef CONFIG_MMC_DW_IDMAC
2215		mmc->max_segs = host->ring_size;
2216		mmc->max_blk_size = 65536;
2217		mmc->max_blk_count = host->ring_size;
2218		mmc->max_seg_size = 0x1000;
2219		mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2220#else
 
2221		mmc->max_segs = 64;
2222		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
 
 
 
 
 
 
 
 
2223		mmc->max_blk_count = 512;
2224		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
 
2225		mmc->max_seg_size = mmc->max_req_size;
2226#endif /* CONFIG_MMC_DW_IDMAC */
2227	}
2228
2229	slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2230	dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
2231
2232	ret = mmc_add_host(mmc);
2233	if (ret)
2234		goto err_setup_bus;
2235
2236#if defined(CONFIG_DEBUG_FS)
2237	dw_mci_init_debugfs(slot);
2238#endif
2239
2240	/* Card initially undetected */
2241	slot->last_detect_state = 0;
2242
2243	return 0;
2244
2245err_setup_bus:
2246	mmc_free_host(mmc);
2247	return -EINVAL;
2248}
2249
2250static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2251{
2252	/* Shutdown detect IRQ */
2253	if (slot->host->pdata->exit)
2254		slot->host->pdata->exit(id);
2255
2256	/* Debugfs stuff is cleaned up by mmc core */
2257	mmc_remove_host(slot->mmc);
2258	slot->host->slot[id] = NULL;
2259	mmc_free_host(slot->mmc);
2260}
2261
2262static void dw_mci_init_dma(struct dw_mci *host)
2263{
2264	/* Alloc memory for sg translation */
2265	host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2266					  &host->sg_dma, GFP_KERNEL);
2267	if (!host->sg_cpu) {
2268		dev_err(host->dev, "%s: could not alloc DMA memory\n",
2269			__func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2270		goto no_dma;
2271	}
2272
2273	/* Determine which DMA interface to use */
2274#ifdef CONFIG_MMC_DW_IDMAC
2275	host->dma_ops = &dw_mci_idmac_ops;
2276	dev_info(host->dev, "Using internal DMA controller.\n");
2277#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2278
2279	if (!host->dma_ops)
2280		goto no_dma;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2281
2282	if (host->dma_ops->init && host->dma_ops->start &&
2283	    host->dma_ops->stop && host->dma_ops->cleanup) {
2284		if (host->dma_ops->init(host)) {
2285			dev_err(host->dev, "%s: Unable to initialize "
2286				"DMA Controller.\n", __func__);
2287			goto no_dma;
2288		}
2289	} else {
2290		dev_err(host->dev, "DMA initialization not found.\n");
2291		goto no_dma;
2292	}
2293
2294	host->use_dma = 1;
2295	return;
2296
2297no_dma:
2298	dev_info(host->dev, "Using PIO mode.\n");
2299	host->use_dma = 0;
2300	return;
2301}
2302
2303static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2304{
2305	unsigned long timeout = jiffies + msecs_to_jiffies(500);
2306	u32 ctrl;
2307
2308	ctrl = mci_readl(host, CTRL);
2309	ctrl |= reset;
2310	mci_writel(host, CTRL, ctrl);
2311
2312	/* wait till resets clear */
2313	do {
2314		ctrl = mci_readl(host, CTRL);
2315		if (!(ctrl & reset))
2316			return true;
2317	} while (time_before(jiffies, timeout));
2318
2319	dev_err(host->dev,
2320		"Timeout resetting block (ctrl reset %#x)\n",
2321		ctrl & reset);
2322
2323	return false;
 
 
2324}
2325
2326static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2327{
 
 
 
 
 
 
2328	/*
2329	 * Reseting generates a block interrupt, hence setting
2330	 * the scatter-gather pointer to NULL.
 
 
 
 
2331	 */
2332	if (host->sg) {
2333		sg_miter_stop(&host->sg_miter);
2334		host->sg = NULL;
 
 
 
 
 
 
 
2335	}
2336
2337	return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2338}
2339
2340static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2341{
2342	return dw_mci_ctrl_reset(host,
2343				 SDMMC_CTRL_FIFO_RESET |
2344				 SDMMC_CTRL_RESET |
2345				 SDMMC_CTRL_DMA_RESET);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2346}
2347
2348#ifdef CONFIG_OF
2349static struct dw_mci_of_quirks {
2350	char *quirk;
2351	int id;
2352} of_quirks[] = {
2353	{
2354		.quirk	= "broken-cd",
2355		.id	= DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2356	},
2357};
2358
2359static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2360{
2361	struct dw_mci_board *pdata;
2362	struct device *dev = host->dev;
2363	struct device_node *np = dev->of_node;
2364	const struct dw_mci_drv_data *drv_data = host->drv_data;
2365	int idx, ret;
2366	u32 clock_frequency;
2367
2368	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2369	if (!pdata) {
2370		dev_err(dev, "could not allocate memory for pdata\n");
2371		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
2372	}
2373
2374	/* find out number of slots supported */
2375	if (of_property_read_u32(dev->of_node, "num-slots",
2376				&pdata->num_slots)) {
2377		dev_info(dev, "num-slots property not found, "
2378				"assuming 1 slot is available\n");
2379		pdata->num_slots = 1;
2380	}
2381
2382	/* get quirks */
2383	for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2384		if (of_get_property(np, of_quirks[idx].quirk, NULL))
2385			pdata->quirks |= of_quirks[idx].id;
2386
2387	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2388		dev_info(dev, "fifo-depth property not found, using "
2389				"value of FIFOTH register as default\n");
2390
2391	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
 
2392
2393	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
 
 
 
 
 
2394		pdata->bus_hz = clock_frequency;
2395
2396	if (drv_data && drv_data->parse_dt) {
2397		ret = drv_data->parse_dt(host);
2398		if (ret)
2399			return ERR_PTR(ret);
2400	}
2401
2402	if (of_find_property(np, "keep-power-in-suspend", NULL))
2403		pdata->pm_caps |= MMC_PM_KEEP_POWER;
2404
2405	if (of_find_property(np, "enable-sdio-wakeup", NULL))
2406		pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2407
2408	if (of_find_property(np, "supports-highspeed", NULL))
2409		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2410
2411	if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2412		pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2413
2414	if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2415		pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2416
2417	if (of_get_property(np, "cd-inverted", NULL))
2418		pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
2419
2420	return pdata;
2421}
2422
2423#else /* CONFIG_OF */
2424static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2425{
2426	return ERR_PTR(-EINVAL);
2427}
2428#endif /* CONFIG_OF */
2429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2430int dw_mci_probe(struct dw_mci *host)
2431{
2432	const struct dw_mci_drv_data *drv_data = host->drv_data;
2433	int width, i, ret = 0;
2434	u32 fifo_size;
2435	int init_slots = 0;
2436
2437	if (!host->pdata) {
2438		host->pdata = dw_mci_parse_dt(host);
2439		if (IS_ERR(host->pdata)) {
 
 
2440			dev_err(host->dev, "platform data not available\n");
2441			return -EINVAL;
2442		}
2443	}
2444
2445	if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2446		dev_err(host->dev,
2447			"Platform data must supply select_slot function\n");
2448		return -ENODEV;
2449	}
2450
2451	host->biu_clk = devm_clk_get(host->dev, "biu");
2452	if (IS_ERR(host->biu_clk)) {
2453		dev_dbg(host->dev, "biu clock not available\n");
2454	} else {
2455		ret = clk_prepare_enable(host->biu_clk);
2456		if (ret) {
2457			dev_err(host->dev, "failed to enable biu clock\n");
2458			return ret;
2459		}
2460	}
2461
2462	host->ciu_clk = devm_clk_get(host->dev, "ciu");
2463	if (IS_ERR(host->ciu_clk)) {
2464		dev_dbg(host->dev, "ciu clock not available\n");
2465		host->bus_hz = host->pdata->bus_hz;
2466	} else {
2467		ret = clk_prepare_enable(host->ciu_clk);
2468		if (ret) {
2469			dev_err(host->dev, "failed to enable ciu clock\n");
2470			goto err_clk_biu;
2471		}
2472
2473		if (host->pdata->bus_hz) {
2474			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2475			if (ret)
2476				dev_warn(host->dev,
2477					 "Unable to set bus rate to %ul\n",
2478					 host->pdata->bus_hz);
2479		}
2480		host->bus_hz = clk_get_rate(host->ciu_clk);
2481	}
2482
2483	if (drv_data && drv_data->init) {
2484		ret = drv_data->init(host);
2485		if (ret) {
2486			dev_err(host->dev,
2487				"implementation specific init failed\n");
2488			goto err_clk_ciu;
2489		}
2490	}
2491
2492	if (drv_data && drv_data->setup_clock) {
2493		ret = drv_data->setup_clock(host);
2494		if (ret) {
2495			dev_err(host->dev,
2496				"implementation specific clock setup failed\n");
2497			goto err_clk_ciu;
2498		}
2499	}
2500
2501	host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2502	if (IS_ERR(host->vmmc)) {
2503		ret = PTR_ERR(host->vmmc);
2504		if (ret == -EPROBE_DEFER)
2505			goto err_clk_ciu;
2506
2507		dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2508		host->vmmc = NULL;
2509	} else {
2510		ret = regulator_enable(host->vmmc);
2511		if (ret) {
2512			if (ret != -EPROBE_DEFER)
2513				dev_err(host->dev,
2514					"regulator_enable fail: %d\n", ret);
2515			goto err_clk_ciu;
2516		}
2517	}
2518
2519	if (!host->bus_hz) {
2520		dev_err(host->dev,
2521			"Platform data must supply bus speed\n");
2522		ret = -ENODEV;
2523		goto err_regulator;
2524	}
2525
2526	host->quirks = host->pdata->quirks;
2527
2528	spin_lock_init(&host->lock);
 
2529	INIT_LIST_HEAD(&host->queue);
2530
2531	/*
2532	 * Get the host data width - this assumes that HCON has been set with
2533	 * the correct values.
2534	 */
2535	i = (mci_readl(host, HCON) >> 7) & 0x7;
2536	if (!i) {
2537		host->push_data = dw_mci_push_data16;
2538		host->pull_data = dw_mci_pull_data16;
2539		width = 16;
2540		host->data_shift = 1;
2541	} else if (i == 2) {
2542		host->push_data = dw_mci_push_data64;
2543		host->pull_data = dw_mci_pull_data64;
2544		width = 64;
2545		host->data_shift = 3;
2546	} else {
2547		/* Check for a reserved value, and warn if it is */
2548		WARN((i != 1),
2549		     "HCON reports a reserved host data width!\n"
2550		     "Defaulting to 32-bit access.\n");
2551		host->push_data = dw_mci_push_data32;
2552		host->pull_data = dw_mci_pull_data32;
2553		width = 32;
2554		host->data_shift = 2;
2555	}
2556
2557	/* Reset all blocks */
2558	if (!dw_mci_ctrl_all_reset(host))
2559		return -ENODEV;
 
 
2560
2561	host->dma_ops = host->pdata->dma_ops;
2562	dw_mci_init_dma(host);
2563
2564	/* Clear the interrupts for the host controller */
2565	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2566	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2567
2568	/* Put in max timeout */
2569	mci_writel(host, TMOUT, 0xFFFFFFFF);
2570
2571	/*
2572	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2573	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
2574	 */
2575	if (!host->pdata->fifo_depth) {
2576		/*
2577		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2578		 * have been overwritten by the bootloader, just like we're
2579		 * about to do, so if you know the value for your hardware, you
2580		 * should put it in the platform data.
2581		 */
2582		fifo_size = mci_readl(host, FIFOTH);
2583		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2584	} else {
2585		fifo_size = host->pdata->fifo_depth;
2586	}
2587	host->fifo_depth = fifo_size;
2588	host->fifoth_val =
2589		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2590	mci_writel(host, FIFOTH, host->fifoth_val);
2591
2592	/* disable clock to CIU */
2593	mci_writel(host, CLKENA, 0);
2594	mci_writel(host, CLKSRC, 0);
2595
2596	/*
2597	 * In 2.40a spec, Data offset is changed.
2598	 * Need to check the version-id and set data-offset for DATA register.
2599	 */
2600	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2601	dev_info(host->dev, "Version ID is %04x\n", host->verid);
2602
2603	if (host->verid < DW_MMC_240A)
2604		host->data_offset = DATA_OFFSET;
 
 
2605	else
2606		host->data_offset = DATA_240A_OFFSET;
2607
2608	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2609	host->card_workqueue = alloc_workqueue("dw-mci-card",
2610			WQ_MEM_RECLAIM, 1);
2611	if (!host->card_workqueue) {
2612		ret = -ENOMEM;
2613		goto err_dmaunmap;
2614	}
2615	INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2616	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2617			       host->irq_flags, "dw-mci", host);
2618	if (ret)
2619		goto err_workqueue;
2620
2621	if (host->pdata->num_slots)
2622		host->num_slots = host->pdata->num_slots;
2623	else
2624		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2625
2626	/*
2627	 * Enable interrupts for command done, data over, data empty, card det,
2628	 * receive ready and error such as transmit, receive timeout, crc error
2629	 */
2630	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2631	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2632		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2633		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2634	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
 
2635
2636	dev_info(host->dev, "DW MMC controller at irq %d, "
2637		 "%d bit host data width, "
2638		 "%u deep fifo\n",
2639		 host->irq, width, fifo_size);
2640
2641	/* We need at least one slot to succeed */
2642	for (i = 0; i < host->num_slots; i++) {
2643		ret = dw_mci_init_slot(host, i);
2644		if (ret)
2645			dev_dbg(host->dev, "slot %d init failed\n", i);
2646		else
2647			init_slots++;
2648	}
2649
2650	if (init_slots) {
2651		dev_info(host->dev, "%d slots initialized\n", init_slots);
2652	} else {
2653		dev_dbg(host->dev, "attempted to initialize %d slots, "
2654					"but failed on all\n", host->num_slots);
2655		goto err_workqueue;
2656	}
2657
2658	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2659		dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2660
2661	return 0;
2662
2663err_workqueue:
2664	destroy_workqueue(host->card_workqueue);
2665
2666err_dmaunmap:
2667	if (host->use_dma && host->dma_ops->exit)
2668		host->dma_ops->exit(host);
2669
2670err_regulator:
2671	if (host->vmmc)
2672		regulator_disable(host->vmmc);
2673
2674err_clk_ciu:
2675	if (!IS_ERR(host->ciu_clk))
2676		clk_disable_unprepare(host->ciu_clk);
2677
2678err_clk_biu:
2679	if (!IS_ERR(host->biu_clk))
2680		clk_disable_unprepare(host->biu_clk);
2681
2682	return ret;
2683}
2684EXPORT_SYMBOL(dw_mci_probe);
2685
2686void dw_mci_remove(struct dw_mci *host)
2687{
2688	int i;
 
 
2689
2690	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2691	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2692
2693	for (i = 0; i < host->num_slots; i++) {
2694		dev_dbg(host->dev, "remove slot %d\n", i);
2695		if (host->slot[i])
2696			dw_mci_cleanup_slot(host->slot[i], i);
2697	}
2698
2699	/* disable clock to CIU */
2700	mci_writel(host, CLKENA, 0);
2701	mci_writel(host, CLKSRC, 0);
2702
2703	destroy_workqueue(host->card_workqueue);
2704
2705	if (host->use_dma && host->dma_ops->exit)
2706		host->dma_ops->exit(host);
2707
2708	if (host->vmmc)
2709		regulator_disable(host->vmmc);
2710
2711	if (!IS_ERR(host->ciu_clk))
2712		clk_disable_unprepare(host->ciu_clk);
2713
2714	if (!IS_ERR(host->biu_clk))
2715		clk_disable_unprepare(host->biu_clk);
2716}
2717EXPORT_SYMBOL(dw_mci_remove);
2718
2719
2720
2721#ifdef CONFIG_PM_SLEEP
2722/*
2723 * TODO: we should probably disable the clock to the card in the suspend path.
2724 */
2725int dw_mci_suspend(struct dw_mci *host)
2726{
2727	if (host->vmmc)
2728		regulator_disable(host->vmmc);
 
 
 
 
 
 
 
 
 
2729
2730	return 0;
2731}
2732EXPORT_SYMBOL(dw_mci_suspend);
2733
2734int dw_mci_resume(struct dw_mci *host)
2735{
2736	int i, ret;
 
2737
2738	if (host->vmmc) {
2739		ret = regulator_enable(host->vmmc);
2740		if (ret) {
2741			dev_err(host->dev,
2742				"failed to enable regulator: %d\n", ret);
2743			return ret;
2744		}
2745	}
2746
2747	if (!dw_mci_ctrl_all_reset(host)) {
 
 
 
 
 
2748		ret = -ENODEV;
2749		return ret;
2750	}
2751
2752	if (host->use_dma && host->dma_ops->init)
2753		host->dma_ops->init(host);
2754
2755	/*
2756	 * Restore the initial value at FIFOTH register
2757	 * And Invalidate the prev_blksz with zero
2758	 */
2759	mci_writel(host, FIFOTH, host->fifoth_val);
2760	host->prev_blksz = 0;
2761
2762	/* Put in max timeout */
2763	mci_writel(host, TMOUT, 0xFFFFFFFF);
2764
2765	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2766	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2767		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2768		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2769	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2770
2771	for (i = 0; i < host->num_slots; i++) {
2772		struct dw_mci_slot *slot = host->slot[i];
2773		if (!slot)
2774			continue;
2775		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2776			dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2777			dw_mci_setup_bus(slot, true);
2778		}
2779	}
 
 
 
 
 
2780	return 0;
 
 
 
 
 
 
 
 
2781}
2782EXPORT_SYMBOL(dw_mci_resume);
2783#endif /* CONFIG_PM_SLEEP */
2784
2785static int __init dw_mci_init(void)
2786{
2787	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2788	return 0;
2789}
2790
2791static void __exit dw_mci_exit(void)
2792{
2793}
2794
2795module_init(dw_mci_init);
2796module_exit(dw_mci_exit);
2797
2798MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2799MODULE_AUTHOR("NXP Semiconductor VietNam");
2800MODULE_AUTHOR("Imagination Technologies Ltd");
2801MODULE_LICENSE("GPL v2");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Synopsys DesignWare Multimedia Card Interface driver
   4 *  (Based on NXP driver for lpc 31xx)
   5 *
   6 * Copyright (C) 2009 NXP Semiconductors
   7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
 
 
 
 
 
   8 */
   9
  10#include <linux/blkdev.h>
  11#include <linux/clk.h>
  12#include <linux/debugfs.h>
  13#include <linux/device.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/err.h>
  16#include <linux/init.h>
  17#include <linux/interrupt.h>
  18#include <linux/iopoll.h>
  19#include <linux/ioport.h>
  20#include <linux/module.h>
  21#include <linux/platform_device.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/seq_file.h>
  24#include <linux/slab.h>
  25#include <linux/stat.h>
  26#include <linux/delay.h>
  27#include <linux/irq.h>
  28#include <linux/mmc/card.h>
  29#include <linux/mmc/host.h>
  30#include <linux/mmc/mmc.h>
  31#include <linux/mmc/sd.h>
  32#include <linux/mmc/sdio.h>
 
  33#include <linux/bitops.h>
  34#include <linux/regulator/consumer.h>
 
  35#include <linux/of.h>
  36#include <linux/of_gpio.h>
  37#include <linux/mmc/slot-gpio.h>
  38
  39#include "dw_mmc.h"
  40
  41/* Common flag combinations */
  42#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
  43				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
  44				 SDMMC_INT_EBE | SDMMC_INT_HLE)
  45#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  46				 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
  47#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
  48				 DW_MCI_CMD_ERROR_FLAGS)
  49#define DW_MCI_SEND_STATUS	1
  50#define DW_MCI_RECV_STATUS	2
  51#define DW_MCI_DMA_THRESHOLD	16
  52
  53#define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
  54#define DW_MCI_FREQ_MIN	100000		/* unit: HZ */
  55
 
  56#define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
  57				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
  58				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
  59				 SDMMC_IDMAC_INT_TI)
  60
  61#define DESC_RING_BUF_SZ	PAGE_SIZE
  62
  63struct idmac_desc_64addr {
  64	u32		des0;	/* Control Descriptor */
  65#define IDMAC_OWN_CLR64(x) \
  66	!((x) & cpu_to_le32(IDMAC_DES0_OWN))
  67
  68	u32		des1;	/* Reserved */
  69
  70	u32		des2;	/*Buffer sizes */
  71#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
  72	((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
  73	 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
  74
  75	u32		des3;	/* Reserved */
  76
  77	u32		des4;	/* Lower 32-bits of Buffer Address Pointer 1*/
  78	u32		des5;	/* Upper 32-bits of Buffer Address Pointer 1*/
  79
  80	u32		des6;	/* Lower 32-bits of Next Descriptor Address */
  81	u32		des7;	/* Upper 32-bits of Next Descriptor Address */
  82};
  83
  84struct idmac_desc {
  85	__le32		des0;	/* Control Descriptor */
  86#define IDMAC_DES0_DIC	BIT(1)
  87#define IDMAC_DES0_LD	BIT(2)
  88#define IDMAC_DES0_FD	BIT(3)
  89#define IDMAC_DES0_CH	BIT(4)
  90#define IDMAC_DES0_ER	BIT(5)
  91#define IDMAC_DES0_CES	BIT(30)
  92#define IDMAC_DES0_OWN	BIT(31)
  93
  94	__le32		des1;	/* Buffer sizes */
  95#define IDMAC_SET_BUFFER1_SIZE(d, s) \
  96	((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
 
 
 
 
 
 
  97
  98	__le32		des2;	/* buffer 1 physical address */
 
 
 
 
 
 
 
 
 
  99
 100	__le32		des3;	/* buffer 2 physical address */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101};
 102
 103/* Each descriptor can transfer up to 4KB of data in chained mode */
 104#define DW_MCI_DESC_DATA_LENGTH	0x1000
 105
 106#if defined(CONFIG_DEBUG_FS)
 107static int dw_mci_req_show(struct seq_file *s, void *v)
 108{
 109	struct dw_mci_slot *slot = s->private;
 110	struct mmc_request *mrq;
 111	struct mmc_command *cmd;
 112	struct mmc_command *stop;
 113	struct mmc_data	*data;
 114
 115	/* Make sure we get a consistent snapshot */
 116	spin_lock_bh(&slot->host->lock);
 117	mrq = slot->mrq;
 118
 119	if (mrq) {
 120		cmd = mrq->cmd;
 121		data = mrq->data;
 122		stop = mrq->stop;
 123
 124		if (cmd)
 125			seq_printf(s,
 126				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 127				   cmd->opcode, cmd->arg, cmd->flags,
 128				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
 129				   cmd->resp[2], cmd->error);
 130		if (data)
 131			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
 132				   data->bytes_xfered, data->blocks,
 133				   data->blksz, data->flags, data->error);
 134		if (stop)
 135			seq_printf(s,
 136				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 137				   stop->opcode, stop->arg, stop->flags,
 138				   stop->resp[0], stop->resp[1], stop->resp[2],
 139				   stop->resp[2], stop->error);
 140	}
 141
 142	spin_unlock_bh(&slot->host->lock);
 143
 144	return 0;
 145}
 146DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
 147
 148static int dw_mci_regs_show(struct seq_file *s, void *v)
 149{
 150	struct dw_mci *host = s->private;
 
 151
 152	pm_runtime_get_sync(host->dev);
 
 
 
 
 
 
 153
 154	seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
 155	seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
 156	seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
 157	seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
 158	seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
 159	seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
 
 
 160
 161	pm_runtime_put_autosuspend(host->dev);
 
 162
 163	return 0;
 
 
 164}
 165DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
 
 
 
 
 
 
 
 166
 167static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
 168{
 169	struct mmc_host	*mmc = slot->mmc;
 170	struct dw_mci *host = slot->host;
 171	struct dentry *root;
 
 172
 173	root = mmc->debugfs_root;
 174	if (!root)
 175		return;
 176
 177	debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
 178	debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
 179	debugfs_create_u32("state", S_IRUSR, root, &host->state);
 180	debugfs_create_xul("pending_events", S_IRUSR, root,
 181			   &host->pending_events);
 182	debugfs_create_xul("completed_events", S_IRUSR, root,
 183			   &host->completed_events);
 184}
 185#endif /* defined(CONFIG_DEBUG_FS) */
 186
 187static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
 188{
 189	u32 ctrl;
 
 190
 191	ctrl = mci_readl(host, CTRL);
 192	ctrl |= reset;
 193	mci_writel(host, CTRL, ctrl);
 194
 195	/* wait till resets clear */
 196	if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
 197				      !(ctrl & reset),
 198				      1, 500 * USEC_PER_MSEC)) {
 199		dev_err(host->dev,
 200			"Timeout resetting block (ctrl reset %#x)\n",
 201			ctrl & reset);
 202		return false;
 203	}
 204
 205	return true;
 206}
 
 
 207
 208static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
 209{
 210	u32 status;
 211
 212	/*
 213	 * Databook says that before issuing a new data transfer command
 214	 * we need to check to see if the card is busy.  Data transfer commands
 215	 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
 216	 *
 217	 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
 218	 * expected.
 219	 */
 220	if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
 221	    !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
 222		if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
 223					      status,
 224					      !(status & SDMMC_STATUS_BUSY),
 225					      10, 500 * USEC_PER_MSEC))
 226			dev_err(host->dev, "Busy; trying anyway\n");
 227	}
 228}
 
 229
 230static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
 231{
 232	struct dw_mci *host = slot->host;
 233	unsigned int cmd_status = 0;
 234
 235	mci_writel(host, CMDARG, arg);
 236	wmb(); /* drain writebuffer */
 237	dw_mci_wait_while_busy(host, cmd);
 238	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
 239
 240	if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
 241				      !(cmd_status & SDMMC_CMD_START),
 242				      1, 500 * USEC_PER_MSEC))
 243		dev_err(&slot->mmc->class_dev,
 244			"Timeout sending command (cmd %#x arg %#x status %#x)\n",
 245			cmd, arg, cmd_status);
 246}
 247
 248static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
 249{
 
 250	struct dw_mci_slot *slot = mmc_priv(mmc);
 251	struct dw_mci *host = slot->host;
 252	u32 cmdr;
 
 253
 254	cmd->error = -EINPROGRESS;
 255	cmdr = cmd->opcode;
 256
 257	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
 258	    cmd->opcode == MMC_GO_IDLE_STATE ||
 259	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
 260	    (cmd->opcode == SD_IO_RW_DIRECT &&
 261	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
 262		cmdr |= SDMMC_CMD_STOP;
 263	else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
 264		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
 265
 266	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
 267		u32 clk_en_a;
 268
 269		/* Special bit makes CMD11 not die */
 270		cmdr |= SDMMC_CMD_VOLT_SWITCH;
 271
 272		/* Change state to continue to handle CMD11 weirdness */
 273		WARN_ON(slot->host->state != STATE_SENDING_CMD);
 274		slot->host->state = STATE_SENDING_CMD11;
 275
 276		/*
 277		 * We need to disable low power mode (automatic clock stop)
 278		 * while doing voltage switch so we don't confuse the card,
 279		 * since stopping the clock is a specific part of the UHS
 280		 * voltage change dance.
 281		 *
 282		 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
 283		 * unconditionally turned back on in dw_mci_setup_bus() if it's
 284		 * ever called with a non-zero clock.  That shouldn't happen
 285		 * until the voltage change is all done.
 286		 */
 287		clk_en_a = mci_readl(host, CLKENA);
 288		clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
 289		mci_writel(host, CLKENA, clk_en_a);
 290		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
 291			     SDMMC_CMD_PRV_DAT_WAIT, 0);
 292	}
 293
 294	if (cmd->flags & MMC_RSP_PRESENT) {
 295		/* We expect a response, so set this bit */
 296		cmdr |= SDMMC_CMD_RESP_EXP;
 297		if (cmd->flags & MMC_RSP_136)
 298			cmdr |= SDMMC_CMD_RESP_LONG;
 299	}
 300
 301	if (cmd->flags & MMC_RSP_CRC)
 302		cmdr |= SDMMC_CMD_RESP_CRC;
 303
 304	if (cmd->data) {
 
 305		cmdr |= SDMMC_CMD_DAT_EXP;
 306		if (cmd->data->flags & MMC_DATA_WRITE)
 
 
 307			cmdr |= SDMMC_CMD_DAT_WR;
 308	}
 309
 310	if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
 311		cmdr |= SDMMC_CMD_USE_HOLD_REG;
 312
 313	return cmdr;
 314}
 315
 316static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
 317{
 318	struct mmc_command *stop;
 319	u32 cmdr;
 320
 321	if (!cmd->data)
 322		return 0;
 323
 324	stop = &host->stop_abort;
 325	cmdr = cmd->opcode;
 326	memset(stop, 0, sizeof(struct mmc_command));
 327
 328	if (cmdr == MMC_READ_SINGLE_BLOCK ||
 329	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
 330	    cmdr == MMC_WRITE_BLOCK ||
 331	    cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
 332	    cmdr == MMC_SEND_TUNING_BLOCK ||
 333	    cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
 334		stop->opcode = MMC_STOP_TRANSMISSION;
 335		stop->arg = 0;
 336		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 337	} else if (cmdr == SD_IO_RW_EXTENDED) {
 338		stop->opcode = SD_IO_RW_DIRECT;
 339		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
 340			     ((cmd->arg >> 28) & 0x7);
 341		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
 342	} else {
 343		return 0;
 344	}
 345
 346	cmdr = stop->opcode | SDMMC_CMD_STOP |
 347		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
 348
 349	if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
 350		cmdr |= SDMMC_CMD_USE_HOLD_REG;
 351
 352	return cmdr;
 353}
 354
 355static inline void dw_mci_set_cto(struct dw_mci *host)
 356{
 357	unsigned int cto_clks;
 358	unsigned int cto_div;
 359	unsigned int cto_ms;
 360	unsigned long irqflags;
 361
 362	cto_clks = mci_readl(host, TMOUT) & 0xff;
 363	cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
 364	if (cto_div == 0)
 365		cto_div = 1;
 366
 367	cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
 368				  host->bus_hz);
 369
 370	/* add a bit spare time */
 371	cto_ms += 10;
 372
 373	/*
 374	 * The durations we're working with are fairly short so we have to be
 375	 * extra careful about synchronization here.  Specifically in hardware a
 376	 * command timeout is _at most_ 5.1 ms, so that means we expect an
 377	 * interrupt (either command done or timeout) to come rather quickly
 378	 * after the mci_writel.  ...but just in case we have a long interrupt
 379	 * latency let's add a bit of paranoia.
 380	 *
 381	 * In general we'll assume that at least an interrupt will be asserted
 382	 * in hardware by the time the cto_timer runs.  ...and if it hasn't
 383	 * been asserted in hardware by that time then we'll assume it'll never
 384	 * come.
 385	 */
 386	spin_lock_irqsave(&host->irq_lock, irqflags);
 387	if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
 388		mod_timer(&host->cto_timer,
 389			jiffies + msecs_to_jiffies(cto_ms) + 1);
 390	spin_unlock_irqrestore(&host->irq_lock, irqflags);
 391}
 392
 393static void dw_mci_start_command(struct dw_mci *host,
 394				 struct mmc_command *cmd, u32 cmd_flags)
 395{
 396	host->cmd = cmd;
 397	dev_vdbg(host->dev,
 398		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
 399		 cmd->arg, cmd_flags);
 400
 401	mci_writel(host, CMDARG, cmd->arg);
 402	wmb(); /* drain writebuffer */
 403	dw_mci_wait_while_busy(host, cmd_flags);
 404
 405	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
 406
 407	/* response expected command only */
 408	if (cmd_flags & SDMMC_CMD_RESP_EXP)
 409		dw_mci_set_cto(host);
 410}
 411
 412static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
 413{
 414	struct mmc_command *stop = &host->stop_abort;
 415
 416	dw_mci_start_command(host, stop, host->stop_cmdr);
 417}
 418
 419/* DMA interface functions */
 420static void dw_mci_stop_dma(struct dw_mci *host)
 421{
 422	if (host->using_dma) {
 423		host->dma_ops->stop(host);
 424		host->dma_ops->cleanup(host);
 425	}
 426
 427	/* Data transfer was stopped by the interrupt handler */
 428	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 429}
 430
 
 
 
 
 
 
 
 
 
 431static void dw_mci_dma_cleanup(struct dw_mci *host)
 432{
 433	struct mmc_data *data = host->data;
 434
 435	if (data && data->host_cookie == COOKIE_MAPPED) {
 436		dma_unmap_sg(host->dev,
 437			     data->sg,
 438			     data->sg_len,
 439			     mmc_get_dma_dir(data));
 440		data->host_cookie = COOKIE_UNMAPPED;
 441	}
 442}
 443
 444static void dw_mci_idmac_reset(struct dw_mci *host)
 445{
 446	u32 bmod = mci_readl(host, BMOD);
 447	/* Software reset of DMA */
 448	bmod |= SDMMC_IDMAC_SWRESET;
 449	mci_writel(host, BMOD, bmod);
 450}
 451
 452static void dw_mci_idmac_stop_dma(struct dw_mci *host)
 453{
 454	u32 temp;
 455
 456	/* Disable and reset the IDMAC interface */
 457	temp = mci_readl(host, CTRL);
 458	temp &= ~SDMMC_CTRL_USE_IDMAC;
 459	temp |= SDMMC_CTRL_DMA_RESET;
 460	mci_writel(host, CTRL, temp);
 461
 462	/* Stop the IDMAC running */
 463	temp = mci_readl(host, BMOD);
 464	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
 465	temp |= SDMMC_IDMAC_SWRESET;
 466	mci_writel(host, BMOD, temp);
 467}
 468
 469static void dw_mci_dmac_complete_dma(void *arg)
 470{
 471	struct dw_mci *host = arg;
 472	struct mmc_data *data = host->data;
 473
 474	dev_vdbg(host->dev, "DMA complete\n");
 475
 476	if ((host->use_dma == TRANS_MODE_EDMAC) &&
 477	    data && (data->flags & MMC_DATA_READ))
 478		/* Invalidate cache after read */
 479		dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
 480				    data->sg,
 481				    data->sg_len,
 482				    DMA_FROM_DEVICE);
 483
 484	host->dma_ops->cleanup(host);
 485
 486	/*
 487	 * If the card was removed, data will be NULL. No point in trying to
 488	 * send the stop command or waiting for NBUSY in this case.
 489	 */
 490	if (data) {
 491		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 492		tasklet_schedule(&host->tasklet);
 493	}
 494}
 495
 496static int dw_mci_idmac_init(struct dw_mci *host)
 
 497{
 498	int i;
 
 499
 500	if (host->dma_64bit_address == 1) {
 501		struct idmac_desc_64addr *p;
 502		/* Number of descriptors in the ring buffer */
 503		host->ring_size =
 504			DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
 505
 506		/* Forward link the descriptor list */
 507		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
 508								i++, p++) {
 509			p->des6 = (host->sg_dma +
 510					(sizeof(struct idmac_desc_64addr) *
 511							(i + 1))) & 0xffffffff;
 512
 513			p->des7 = (u64)(host->sg_dma +
 514					(sizeof(struct idmac_desc_64addr) *
 515							(i + 1))) >> 32;
 516			/* Initialize reserved and buffer size fields to "0" */
 517			p->des0 = 0;
 518			p->des1 = 0;
 519			p->des2 = 0;
 520			p->des3 = 0;
 521		}
 522
 523		/* Set the last descriptor as the end-of-ring descriptor */
 524		p->des6 = host->sg_dma & 0xffffffff;
 525		p->des7 = (u64)host->sg_dma >> 32;
 526		p->des0 = IDMAC_DES0_ER;
 527
 528	} else {
 529		struct idmac_desc *p;
 530		/* Number of descriptors in the ring buffer */
 531		host->ring_size =
 532			DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
 533
 534		/* Forward link the descriptor list */
 535		for (i = 0, p = host->sg_cpu;
 536		     i < host->ring_size - 1;
 537		     i++, p++) {
 538			p->des3 = cpu_to_le32(host->sg_dma +
 539					(sizeof(struct idmac_desc) * (i + 1)));
 540			p->des0 = 0;
 541			p->des1 = 0;
 542		}
 543
 544		/* Set the last descriptor as the end-of-ring descriptor */
 545		p->des3 = cpu_to_le32(host->sg_dma);
 546		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
 547	}
 548
 549	dw_mci_idmac_reset(host);
 550
 551	if (host->dma_64bit_address == 1) {
 552		/* Mask out interrupts - get Tx & Rx complete only */
 553		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
 554		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
 555				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
 556
 557		/* Set the descriptor base address */
 558		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
 559		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
 560
 561	} else {
 562		/* Mask out interrupts - get Tx & Rx complete only */
 563		mci_writel(host, IDSTS, IDMAC_INT_CLR);
 564		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
 565				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
 566
 567		/* Set the descriptor base address */
 568		mci_writel(host, DBADDR, host->sg_dma);
 569	}
 570
 571	return 0;
 572}
 573
 574static inline int dw_mci_prepare_desc64(struct dw_mci *host,
 575					 struct mmc_data *data,
 576					 unsigned int sg_len)
 577{
 578	unsigned int desc_len;
 579	struct idmac_desc_64addr *desc_first, *desc_last, *desc;
 580	u32 val;
 581	int i;
 582
 583	desc_first = desc_last = desc = host->sg_cpu;
 584
 585	for (i = 0; i < sg_len; i++) {
 586		unsigned int length = sg_dma_len(&data->sg[i]);
 587
 588		u64 mem_addr = sg_dma_address(&data->sg[i]);
 589
 590		for ( ; length ; desc++) {
 591			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
 592				   length : DW_MCI_DESC_DATA_LENGTH;
 593
 594			length -= desc_len;
 595
 596			/*
 597			 * Wait for the former clear OWN bit operation
 598			 * of IDMAC to make sure that this descriptor
 599			 * isn't still owned by IDMAC as IDMAC's write
 600			 * ops and CPU's read ops are asynchronous.
 601			 */
 602			if (readl_poll_timeout_atomic(&desc->des0, val,
 603						!(val & IDMAC_DES0_OWN),
 604						10, 100 * USEC_PER_MSEC))
 605				goto err_own_bit;
 606
 607			/*
 608			 * Set the OWN bit and disable interrupts
 609			 * for this descriptor
 610			 */
 611			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
 612						IDMAC_DES0_CH;
 613
 614			/* Buffer length */
 615			IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
 616
 617			/* Physical address to DMA to/from */
 618			desc->des4 = mem_addr & 0xffffffff;
 619			desc->des5 = mem_addr >> 32;
 620
 621			/* Update physical address for the next desc */
 622			mem_addr += desc_len;
 623
 624			/* Save pointer to the last descriptor */
 625			desc_last = desc;
 626		}
 627	}
 628
 629	/* Set first descriptor */
 630	desc_first->des0 |= IDMAC_DES0_FD;
 631
 632	/* Set last descriptor */
 633	desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
 634	desc_last->des0 |= IDMAC_DES0_LD;
 635
 636	return 0;
 637err_own_bit:
 638	/* restore the descriptor chain as it's polluted */
 639	dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
 640	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
 641	dw_mci_idmac_init(host);
 642	return -EINVAL;
 643}
 644
 645
 646static inline int dw_mci_prepare_desc32(struct dw_mci *host,
 647					 struct mmc_data *data,
 648					 unsigned int sg_len)
 649{
 650	unsigned int desc_len;
 651	struct idmac_desc *desc_first, *desc_last, *desc;
 652	u32 val;
 653	int i;
 654
 655	desc_first = desc_last = desc = host->sg_cpu;
 656
 657	for (i = 0; i < sg_len; i++) {
 658		unsigned int length = sg_dma_len(&data->sg[i]);
 659
 660		u32 mem_addr = sg_dma_address(&data->sg[i]);
 661
 662		for ( ; length ; desc++) {
 663			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
 664				   length : DW_MCI_DESC_DATA_LENGTH;
 665
 666			length -= desc_len;
 
 667
 668			/*
 669			 * Wait for the former clear OWN bit operation
 670			 * of IDMAC to make sure that this descriptor
 671			 * isn't still owned by IDMAC as IDMAC's write
 672			 * ops and CPU's read ops are asynchronous.
 673			 */
 674			if (readl_poll_timeout_atomic(&desc->des0, val,
 675						      IDMAC_OWN_CLR64(val),
 676						      10,
 677						      100 * USEC_PER_MSEC))
 678				goto err_own_bit;
 679
 680			/*
 681			 * Set the OWN bit and disable interrupts
 682			 * for this descriptor
 683			 */
 684			desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
 685						 IDMAC_DES0_DIC |
 686						 IDMAC_DES0_CH);
 687
 688			/* Buffer length */
 689			IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
 690
 691			/* Physical address to DMA to/from */
 692			desc->des2 = cpu_to_le32(mem_addr);
 693
 694			/* Update physical address for the next desc */
 695			mem_addr += desc_len;
 696
 697			/* Save pointer to the last descriptor */
 698			desc_last = desc;
 699		}
 700	}
 701
 702	/* Set first descriptor */
 703	desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
 
 704
 705	/* Set last descriptor */
 706	desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
 707				       IDMAC_DES0_DIC));
 708	desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
 709
 710	return 0;
 711err_own_bit:
 712	/* restore the descriptor chain as it's polluted */
 713	dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
 714	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
 715	dw_mci_idmac_init(host);
 716	return -EINVAL;
 717}
 718
 719static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 720{
 721	u32 temp;
 722	int ret;
 723
 724	if (host->dma_64bit_address == 1)
 725		ret = dw_mci_prepare_desc64(host, host->data, sg_len);
 726	else
 727		ret = dw_mci_prepare_desc32(host, host->data, sg_len);
 728
 729	if (ret)
 730		goto out;
 731
 732	/* drain writebuffer */
 733	wmb();
 734
 735	/* Make sure to reset DMA in case we did PIO before this */
 736	dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
 737	dw_mci_idmac_reset(host);
 738
 739	/* Select IDMAC interface */
 740	temp = mci_readl(host, CTRL);
 741	temp |= SDMMC_CTRL_USE_IDMAC;
 742	mci_writel(host, CTRL, temp);
 743
 744	/* drain writebuffer */
 745	wmb();
 746
 747	/* Enable the IDMAC */
 748	temp = mci_readl(host, BMOD);
 749	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
 750	mci_writel(host, BMOD, temp);
 751
 752	/* Start it running */
 753	mci_writel(host, PLDMND, 1);
 754
 755out:
 756	return ret;
 757}
 758
 759static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
 760	.init = dw_mci_idmac_init,
 761	.start = dw_mci_idmac_start_dma,
 762	.stop = dw_mci_idmac_stop_dma,
 763	.complete = dw_mci_dmac_complete_dma,
 764	.cleanup = dw_mci_dma_cleanup,
 765};
 766
 767static void dw_mci_edmac_stop_dma(struct dw_mci *host)
 768{
 769	dmaengine_terminate_async(host->dms->ch);
 770}
 771
 772static int dw_mci_edmac_start_dma(struct dw_mci *host,
 773					    unsigned int sg_len)
 774{
 775	struct dma_slave_config cfg;
 776	struct dma_async_tx_descriptor *desc = NULL;
 777	struct scatterlist *sgl = host->data->sg;
 778	static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 779	u32 sg_elems = host->data->sg_len;
 780	u32 fifoth_val;
 781	u32 fifo_offset = host->fifo_reg - host->regs;
 782	int ret = 0;
 783
 784	/* Set external dma config: burst size, burst width */
 785	cfg.dst_addr = host->phy_regs + fifo_offset;
 786	cfg.src_addr = cfg.dst_addr;
 787	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 788	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 789
 790	/* Match burst msize with external dma config */
 791	fifoth_val = mci_readl(host, FIFOTH);
 792	cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
 793	cfg.src_maxburst = cfg.dst_maxburst;
 794
 795	if (host->data->flags & MMC_DATA_WRITE)
 796		cfg.direction = DMA_MEM_TO_DEV;
 797	else
 798		cfg.direction = DMA_DEV_TO_MEM;
 
 
 
 799
 800	ret = dmaengine_slave_config(host->dms->ch, &cfg);
 801	if (ret) {
 802		dev_err(host->dev, "Failed to config edmac.\n");
 803		return -EBUSY;
 804	}
 805
 806	desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
 807				       sg_len, cfg.direction,
 808				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 809	if (!desc) {
 810		dev_err(host->dev, "Can't prepare slave sg.\n");
 811		return -EBUSY;
 812	}
 813
 814	/* Set dw_mci_dmac_complete_dma as callback */
 815	desc->callback = dw_mci_dmac_complete_dma;
 816	desc->callback_param = (void *)host;
 817	dmaengine_submit(desc);
 818
 819	/* Flush cache before write */
 820	if (host->data->flags & MMC_DATA_WRITE)
 821		dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
 822				       sg_elems, DMA_TO_DEVICE);
 823
 824	dma_async_issue_pending(host->dms->ch);
 825
 
 
 826	return 0;
 827}
 828
 829static int dw_mci_edmac_init(struct dw_mci *host)
 830{
 831	/* Request external dma channel */
 832	host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
 833	if (!host->dms)
 834		return -ENOMEM;
 835
 836	host->dms->ch = dma_request_chan(host->dev, "rx-tx");
 837	if (IS_ERR(host->dms->ch)) {
 838		int ret = PTR_ERR(host->dms->ch);
 839
 840		dev_err(host->dev, "Failed to get external DMA channel.\n");
 841		kfree(host->dms);
 842		host->dms = NULL;
 843		return ret;
 844	}
 845
 846	return 0;
 847}
 848
 849static void dw_mci_edmac_exit(struct dw_mci *host)
 850{
 851	if (host->dms) {
 852		if (host->dms->ch) {
 853			dma_release_channel(host->dms->ch);
 854			host->dms->ch = NULL;
 855		}
 856		kfree(host->dms);
 857		host->dms = NULL;
 858	}
 859}
 860
 861static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
 862	.init = dw_mci_edmac_init,
 863	.exit = dw_mci_edmac_exit,
 864	.start = dw_mci_edmac_start_dma,
 865	.stop = dw_mci_edmac_stop_dma,
 866	.complete = dw_mci_dmac_complete_dma,
 867	.cleanup = dw_mci_dma_cleanup,
 868};
 
 869
 870static int dw_mci_pre_dma_transfer(struct dw_mci *host,
 871				   struct mmc_data *data,
 872				   int cookie)
 873{
 874	struct scatterlist *sg;
 875	unsigned int i, sg_len;
 876
 877	if (data->host_cookie == COOKIE_PRE_MAPPED)
 878		return data->sg_len;
 879
 880	/*
 881	 * We don't do DMA on "complex" transfers, i.e. with
 882	 * non-word-aligned buffers or lengths. Also, we don't bother
 883	 * with all the DMA setup overhead for short transfers.
 884	 */
 885	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
 886		return -EINVAL;
 887
 888	if (data->blksz & 3)
 889		return -EINVAL;
 890
 891	for_each_sg(data->sg, sg, data->sg_len, i) {
 892		if (sg->offset & 3 || sg->length & 3)
 893			return -EINVAL;
 894	}
 895
 896	sg_len = dma_map_sg(host->dev,
 897			    data->sg,
 898			    data->sg_len,
 899			    mmc_get_dma_dir(data));
 900	if (sg_len == 0)
 901		return -EINVAL;
 902
 903	data->host_cookie = cookie;
 
 904
 905	return sg_len;
 906}
 907
 908static void dw_mci_pre_req(struct mmc_host *mmc,
 909			   struct mmc_request *mrq)
 
 910{
 911	struct dw_mci_slot *slot = mmc_priv(mmc);
 912	struct mmc_data *data = mrq->data;
 913
 914	if (!slot->host->use_dma || !data)
 915		return;
 916
 917	/* This data might be unmapped at this time */
 918	data->host_cookie = COOKIE_UNMAPPED;
 
 
 919
 920	if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
 921				COOKIE_PRE_MAPPED) < 0)
 922		data->host_cookie = COOKIE_UNMAPPED;
 923}
 924
 925static void dw_mci_post_req(struct mmc_host *mmc,
 926			    struct mmc_request *mrq,
 927			    int err)
 928{
 929	struct dw_mci_slot *slot = mmc_priv(mmc);
 930	struct mmc_data *data = mrq->data;
 931
 932	if (!slot->host->use_dma || !data)
 933		return;
 934
 935	if (data->host_cookie != COOKIE_UNMAPPED)
 936		dma_unmap_sg(slot->host->dev,
 937			     data->sg,
 938			     data->sg_len,
 939			     mmc_get_dma_dir(data));
 940	data->host_cookie = COOKIE_UNMAPPED;
 941}
 942
 943static int dw_mci_get_cd(struct mmc_host *mmc)
 944{
 945	int present;
 946	struct dw_mci_slot *slot = mmc_priv(mmc);
 947	struct dw_mci *host = slot->host;
 948	int gpio_cd = mmc_gpio_get_cd(mmc);
 949
 950	/* Use platform get_cd function, else try onboard card detect */
 951	if (((mmc->caps & MMC_CAP_NEEDS_POLL)
 952				|| !mmc_card_is_removable(mmc))) {
 953		present = 1;
 954
 955		if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
 956			if (mmc->caps & MMC_CAP_NEEDS_POLL) {
 957				dev_info(&mmc->class_dev,
 958					"card is polling.\n");
 959			} else {
 960				dev_info(&mmc->class_dev,
 961					"card is non-removable.\n");
 962			}
 963			set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
 964		}
 965
 966		return present;
 967	} else if (gpio_cd >= 0)
 968		present = gpio_cd;
 969	else
 970		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
 971			== 0 ? 1 : 0;
 972
 973	spin_lock_bh(&host->lock);
 974	if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
 975		dev_dbg(&mmc->class_dev, "card is present\n");
 976	else if (!present &&
 977			!test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
 978		dev_dbg(&mmc->class_dev, "card is not present\n");
 979	spin_unlock_bh(&host->lock);
 980
 981	return present;
 982}
 983
 984static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
 985{
 
 986	unsigned int blksz = data->blksz;
 987	static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 988	u32 fifo_width = 1 << host->data_shift;
 989	u32 blksz_depth = blksz / fifo_width, fifoth_val;
 990	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
 991	int idx = ARRAY_SIZE(mszs) - 1;
 992
 993	/* pio should ship this scenario */
 994	if (!host->use_dma)
 995		return;
 996
 997	tx_wmark = (host->fifo_depth) / 2;
 998	tx_wmark_invers = host->fifo_depth - tx_wmark;
 999
1000	/*
1001	 * MSIZE is '1',
1002	 * if blksz is not a multiple of the FIFO width
1003	 */
1004	if (blksz % fifo_width)
 
 
1005		goto done;
 
1006
1007	do {
1008		if (!((blksz_depth % mszs[idx]) ||
1009		     (tx_wmark_invers % mszs[idx]))) {
1010			msize = idx;
1011			rx_wmark = mszs[idx] - 1;
1012			break;
1013		}
1014	} while (--idx > 0);
1015	/*
1016	 * If idx is '0', it won't be tried
1017	 * Thus, initial values are uesed
1018	 */
1019done:
1020	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1021	mci_writel(host, FIFOTH, fifoth_val);
 
1022}
1023
1024static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1025{
1026	unsigned int blksz = data->blksz;
1027	u32 blksz_depth, fifo_depth;
1028	u16 thld_size;
1029	u8 enable;
1030
1031	/*
1032	 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1033	 * in the FIFO region, so we really shouldn't access it).
1034	 */
1035	if (host->verid < DW_MMC_240A ||
1036		(host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1037		return;
1038
1039	/*
1040	 * Card write Threshold is introduced since 2.80a
1041	 * It's used when HS400 mode is enabled.
1042	 */
1043	if (data->flags & MMC_DATA_WRITE &&
1044		host->timing != MMC_TIMING_MMC_HS400)
1045		goto disable;
1046
1047	if (data->flags & MMC_DATA_WRITE)
1048		enable = SDMMC_CARD_WR_THR_EN;
1049	else
1050		enable = SDMMC_CARD_RD_THR_EN;
1051
1052	if (host->timing != MMC_TIMING_MMC_HS200 &&
1053	    host->timing != MMC_TIMING_UHS_SDR104 &&
1054	    host->timing != MMC_TIMING_MMC_HS400)
1055		goto disable;
1056
1057	blksz_depth = blksz / (1 << host->data_shift);
1058	fifo_depth = host->fifo_depth;
1059
1060	if (blksz_depth > fifo_depth)
1061		goto disable;
1062
1063	/*
1064	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1065	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
1066	 * Currently just choose blksz.
1067	 */
1068	thld_size = blksz;
1069	mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1070	return;
1071
1072disable:
1073	mci_writel(host, CDTHRCTL, 0);
1074}
1075
1076static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1077{
1078	unsigned long irqflags;
1079	int sg_len;
1080	u32 temp;
1081
1082	host->using_dma = 0;
1083
1084	/* If we don't have a channel, we can't do DMA */
1085	if (!host->use_dma)
1086		return -ENODEV;
1087
1088	sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1089	if (sg_len < 0) {
1090		host->dma_ops->stop(host);
1091		return sg_len;
1092	}
1093
1094	host->using_dma = 1;
1095
1096	if (host->use_dma == TRANS_MODE_IDMAC)
1097		dev_vdbg(host->dev,
1098			 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1099			 (unsigned long)host->sg_cpu,
1100			 (unsigned long)host->sg_dma,
1101			 sg_len);
1102
1103	/*
1104	 * Decide the MSIZE and RX/TX Watermark.
1105	 * If current block size is same with previous size,
1106	 * no need to update fifoth.
1107	 */
1108	if (host->prev_blksz != data->blksz)
1109		dw_mci_adjust_fifoth(host, data);
1110
1111	/* Enable the DMA interface */
1112	temp = mci_readl(host, CTRL);
1113	temp |= SDMMC_CTRL_DMA_ENABLE;
1114	mci_writel(host, CTRL, temp);
1115
1116	/* Disable RX/TX IRQs, let DMA handle it */
1117	spin_lock_irqsave(&host->irq_lock, irqflags);
1118	temp = mci_readl(host, INTMASK);
1119	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1120	mci_writel(host, INTMASK, temp);
1121	spin_unlock_irqrestore(&host->irq_lock, irqflags);
1122
1123	if (host->dma_ops->start(host, sg_len)) {
1124		host->dma_ops->stop(host);
1125		/* We can't do DMA, try PIO for this one */
1126		dev_dbg(host->dev,
1127			"%s: fall back to PIO mode for current transfer\n",
1128			__func__);
1129		return -ENODEV;
1130	}
1131
1132	return 0;
1133}
1134
1135static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1136{
1137	unsigned long irqflags;
1138	int flags = SG_MITER_ATOMIC;
1139	u32 temp;
1140
1141	data->error = -EINPROGRESS;
1142
1143	WARN_ON(host->data);
1144	host->sg = NULL;
1145	host->data = data;
1146
1147	if (data->flags & MMC_DATA_READ)
1148		host->dir_status = DW_MCI_RECV_STATUS;
1149	else
 
1150		host->dir_status = DW_MCI_SEND_STATUS;
1151
1152	dw_mci_ctrl_thld(host, data);
1153
1154	if (dw_mci_submit_data_dma(host, data)) {
 
1155		if (host->data->flags & MMC_DATA_READ)
1156			flags |= SG_MITER_TO_SG;
1157		else
1158			flags |= SG_MITER_FROM_SG;
1159
1160		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1161		host->sg = data->sg;
1162		host->part_buf_start = 0;
1163		host->part_buf_count = 0;
1164
1165		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1166
1167		spin_lock_irqsave(&host->irq_lock, irqflags);
1168		temp = mci_readl(host, INTMASK);
1169		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1170		mci_writel(host, INTMASK, temp);
1171		spin_unlock_irqrestore(&host->irq_lock, irqflags);
1172
1173		temp = mci_readl(host, CTRL);
1174		temp &= ~SDMMC_CTRL_DMA_ENABLE;
1175		mci_writel(host, CTRL, temp);
1176
1177		/*
1178		 * Use the initial fifoth_val for PIO mode. If wm_algined
1179		 * is set, we set watermark same as data size.
1180		 * If next issued data may be transfered by DMA mode,
1181		 * prev_blksz should be invalidated.
1182		 */
1183		if (host->wm_aligned)
1184			dw_mci_adjust_fifoth(host, data);
1185		else
1186			mci_writel(host, FIFOTH, host->fifoth_val);
1187		host->prev_blksz = 0;
1188	} else {
1189		/*
1190		 * Keep the current block size.
1191		 * It will be used to decide whether to update
1192		 * fifoth register next time.
1193		 */
1194		host->prev_blksz = data->blksz;
1195	}
1196}
1197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1198static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1199{
1200	struct dw_mci *host = slot->host;
1201	unsigned int clock = slot->clock;
1202	u32 div;
1203	u32 clk_en_a;
1204	u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1205
1206	/* We must continue to set bit 28 in CMD until the change is complete */
1207	if (host->state == STATE_WAITING_CMD11_DONE)
1208		sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1209
1210	slot->mmc->actual_clock = 0;
1211
1212	if (!clock) {
1213		mci_writel(host, CLKENA, 0);
1214		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1215	} else if (clock != host->current_speed || force_clkinit) {
1216		div = host->bus_hz / clock;
1217		if (host->bus_hz % clock && host->bus_hz > clock)
1218			/*
1219			 * move the + 1 after the divide to prevent
1220			 * over-clocking the card.
1221			 */
1222			div += 1;
1223
1224		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1225
1226		if ((clock != slot->__clk_old &&
1227			!test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1228			force_clkinit) {
1229			/* Silent the verbose log if calling from PM context */
1230			if (!force_clkinit)
1231				dev_info(&slot->mmc->class_dev,
1232					 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1233					 slot->id, host->bus_hz, clock,
1234					 div ? ((host->bus_hz / div) >> 1) :
1235					 host->bus_hz, div);
1236
1237			/*
1238			 * If card is polling, display the message only
1239			 * one time at boot time.
1240			 */
1241			if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1242					slot->mmc->f_min == clock)
1243				set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1244		}
1245
1246		/* disable clock */
1247		mci_writel(host, CLKENA, 0);
1248		mci_writel(host, CLKSRC, 0);
1249
1250		/* inform CIU */
1251		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1252
1253		/* set clock to desired speed */
1254		mci_writel(host, CLKDIV, div);
1255
1256		/* inform CIU */
1257		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1258
1259		/* enable clock; only low power if no SDIO */
1260		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1261		if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1262			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1263		mci_writel(host, CLKENA, clk_en_a);
1264
1265		/* inform CIU */
1266		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
 
1267
1268		/* keep the last clock value that was requested from core */
1269		slot->__clk_old = clock;
1270		slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1271					  host->bus_hz;
1272	}
1273
1274	host->current_speed = clock;
1275
1276	/* Set the current slot bus width */
1277	mci_writel(host, CTYPE, (slot->ctype << slot->id));
1278}
1279
1280static void __dw_mci_start_request(struct dw_mci *host,
1281				   struct dw_mci_slot *slot,
1282				   struct mmc_command *cmd)
1283{
1284	struct mmc_request *mrq;
1285	struct mmc_data	*data;
1286	u32 cmdflags;
1287
1288	mrq = slot->mrq;
 
 
1289
 
1290	host->mrq = mrq;
1291
1292	host->pending_events = 0;
1293	host->completed_events = 0;
1294	host->cmd_status = 0;
1295	host->data_status = 0;
1296	host->dir_status = 0;
1297
1298	data = cmd->data;
1299	if (data) {
1300		mci_writel(host, TMOUT, 0xFFFFFFFF);
1301		mci_writel(host, BYTCNT, data->blksz*data->blocks);
1302		mci_writel(host, BLKSIZ, data->blksz);
1303	}
1304
1305	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1306
1307	/* this is the first command, send the initialization clock */
1308	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1309		cmdflags |= SDMMC_CMD_INIT;
1310
1311	if (data) {
1312		dw_mci_submit_data(host, data);
1313		wmb(); /* drain writebuffer */
1314	}
1315
1316	dw_mci_start_command(host, cmd, cmdflags);
1317
1318	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1319		unsigned long irqflags;
1320
1321		/*
1322		 * Databook says to fail after 2ms w/ no response, but evidence
1323		 * shows that sometimes the cmd11 interrupt takes over 130ms.
1324		 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1325		 * is just about to roll over.
1326		 *
1327		 * We do this whole thing under spinlock and only if the
1328		 * command hasn't already completed (indicating the the irq
1329		 * already ran so we don't want the timeout).
1330		 */
1331		spin_lock_irqsave(&host->irq_lock, irqflags);
1332		if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1333			mod_timer(&host->cmd11_timer,
1334				jiffies + msecs_to_jiffies(500) + 1);
1335		spin_unlock_irqrestore(&host->irq_lock, irqflags);
1336	}
1337
1338	host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1339}
1340
1341static void dw_mci_start_request(struct dw_mci *host,
1342				 struct dw_mci_slot *slot)
1343{
1344	struct mmc_request *mrq = slot->mrq;
1345	struct mmc_command *cmd;
1346
1347	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1348	__dw_mci_start_request(host, slot, cmd);
1349}
1350
1351/* must be called with host->lock held */
1352static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1353				 struct mmc_request *mrq)
1354{
1355	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1356		 host->state);
1357
1358	slot->mrq = mrq;
1359
1360	if (host->state == STATE_WAITING_CMD11_DONE) {
1361		dev_warn(&slot->mmc->class_dev,
1362			 "Voltage change didn't complete\n");
1363		/*
1364		 * this case isn't expected to happen, so we can
1365		 * either crash here or just try to continue on
1366		 * in the closest possible state
1367		 */
1368		host->state = STATE_IDLE;
1369	}
1370
1371	if (host->state == STATE_IDLE) {
1372		host->state = STATE_SENDING_CMD;
1373		dw_mci_start_request(host, slot);
1374	} else {
1375		list_add_tail(&slot->queue_node, &host->queue);
1376	}
1377}
1378
1379static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1380{
1381	struct dw_mci_slot *slot = mmc_priv(mmc);
1382	struct dw_mci *host = slot->host;
1383
1384	WARN_ON(slot->mrq);
1385
1386	/*
1387	 * The check for card presence and queueing of the request must be
1388	 * atomic, otherwise the card could be removed in between and the
1389	 * request wouldn't fail until another card was inserted.
1390	 */
 
1391
1392	if (!dw_mci_get_cd(mmc)) {
 
1393		mrq->cmd->error = -ENOMEDIUM;
1394		mmc_request_done(mmc, mrq);
1395		return;
1396	}
1397
1398	spin_lock_bh(&host->lock);
1399
1400	dw_mci_queue_request(host, slot, mrq);
1401
1402	spin_unlock_bh(&host->lock);
1403}
1404
1405static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1406{
1407	struct dw_mci_slot *slot = mmc_priv(mmc);
1408	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1409	u32 regs;
1410	int ret;
1411
1412	switch (ios->bus_width) {
1413	case MMC_BUS_WIDTH_4:
1414		slot->ctype = SDMMC_CTYPE_4BIT;
1415		break;
1416	case MMC_BUS_WIDTH_8:
1417		slot->ctype = SDMMC_CTYPE_8BIT;
1418		break;
1419	default:
1420		/* set default 1 bit mode */
1421		slot->ctype = SDMMC_CTYPE_1BIT;
1422	}
1423
1424	regs = mci_readl(slot->host, UHS_REG);
1425
1426	/* DDR mode set */
1427	if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1428	    ios->timing == MMC_TIMING_UHS_DDR50 ||
1429	    ios->timing == MMC_TIMING_MMC_HS400)
1430		regs |= ((0x1 << slot->id) << 16);
1431	else
1432		regs &= ~((0x1 << slot->id) << 16);
1433
1434	mci_writel(slot->host, UHS_REG, regs);
1435	slot->host->timing = ios->timing;
1436
1437	/*
1438	 * Use mirror of ios->clock to prevent race with mmc
1439	 * core ios update when finding the minimum.
1440	 */
1441	slot->clock = ios->clock;
1442
1443	if (drv_data && drv_data->set_ios)
1444		drv_data->set_ios(slot->host, ios);
1445
 
 
 
1446	switch (ios->power_mode) {
1447	case MMC_POWER_UP:
1448		if (!IS_ERR(mmc->supply.vmmc)) {
1449			ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1450					ios->vdd);
1451			if (ret) {
1452				dev_err(slot->host->dev,
1453					"failed to enable vmmc regulator\n");
1454				/*return, if failed turn on vmmc*/
1455				return;
1456			}
1457		}
1458		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
 
 
 
1459		regs = mci_readl(slot->host, PWREN);
1460		regs |= (1 << slot->id);
1461		mci_writel(slot->host, PWREN, regs);
1462		break;
1463	case MMC_POWER_ON:
1464		if (!slot->host->vqmmc_enabled) {
1465			if (!IS_ERR(mmc->supply.vqmmc)) {
1466				ret = regulator_enable(mmc->supply.vqmmc);
1467				if (ret < 0)
1468					dev_err(slot->host->dev,
1469						"failed to enable vqmmc\n");
1470				else
1471					slot->host->vqmmc_enabled = true;
1472
1473			} else {
1474				/* Keep track so we don't reset again */
1475				slot->host->vqmmc_enabled = true;
1476			}
1477
1478			/* Reset our state machine after powering on */
1479			dw_mci_ctrl_reset(slot->host,
1480					  SDMMC_CTRL_ALL_RESET_FLAGS);
1481		}
1482
1483		/* Adjust clock / bus width after power is up */
1484		dw_mci_setup_bus(slot, false);
1485
1486		break;
1487	case MMC_POWER_OFF:
1488		/* Turn clock off before power goes down */
1489		dw_mci_setup_bus(slot, false);
1490
1491		if (!IS_ERR(mmc->supply.vmmc))
1492			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1493
1494		if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1495			regulator_disable(mmc->supply.vqmmc);
1496		slot->host->vqmmc_enabled = false;
1497
1498		regs = mci_readl(slot->host, PWREN);
1499		regs &= ~(1 << slot->id);
1500		mci_writel(slot->host, PWREN, regs);
1501		break;
1502	default:
1503		break;
1504	}
1505
1506	if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1507		slot->host->state = STATE_IDLE;
1508}
1509
1510static int dw_mci_card_busy(struct mmc_host *mmc)
1511{
1512	struct dw_mci_slot *slot = mmc_priv(mmc);
1513	u32 status;
1514
1515	/*
1516	 * Check the busy bit which is low when DAT[3:0]
1517	 * (the data lines) are 0000
1518	 */
1519	status = mci_readl(slot->host, STATUS);
1520
1521	return !!(status & SDMMC_STATUS_BUSY);
1522}
1523
1524static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1525{
1526	struct dw_mci_slot *slot = mmc_priv(mmc);
1527	struct dw_mci *host = slot->host;
1528	const struct dw_mci_drv_data *drv_data = host->drv_data;
1529	u32 uhs;
1530	u32 v18 = SDMMC_UHS_18V << slot->id;
1531	int ret;
1532
1533	if (drv_data && drv_data->switch_voltage)
1534		return drv_data->switch_voltage(mmc, ios);
1535
1536	/*
1537	 * Program the voltage.  Note that some instances of dw_mmc may use
1538	 * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1539	 * does no harm but you need to set the regulator directly.  Try both.
1540	 */
1541	uhs = mci_readl(host, UHS_REG);
1542	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1543		uhs &= ~v18;
1544	else
1545		uhs |= v18;
1546
1547	if (!IS_ERR(mmc->supply.vqmmc)) {
1548		ret = mmc_regulator_set_vqmmc(mmc, ios);
1549		if (ret < 0) {
1550			dev_dbg(&mmc->class_dev,
1551					 "Regulator set error %d - %s V\n",
1552					 ret, uhs & v18 ? "1.8" : "3.3");
1553			return ret;
1554		}
1555	}
1556	mci_writel(host, UHS_REG, uhs);
1557
1558	return 0;
1559}
1560
1561static int dw_mci_get_ro(struct mmc_host *mmc)
1562{
1563	int read_only;
1564	struct dw_mci_slot *slot = mmc_priv(mmc);
1565	int gpio_ro = mmc_gpio_get_ro(mmc);
1566
1567	/* Use platform get_ro function, else try on board write protect */
1568	if (gpio_ro >= 0)
1569		read_only = gpio_ro;
 
 
 
 
1570	else
1571		read_only =
1572			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1573
1574	dev_dbg(&mmc->class_dev, "card is %s\n",
1575		read_only ? "read-only" : "read-write");
1576
1577	return read_only;
1578}
1579
1580static void dw_mci_hw_reset(struct mmc_host *mmc)
1581{
 
1582	struct dw_mci_slot *slot = mmc_priv(mmc);
 
1583	struct dw_mci *host = slot->host;
1584	int reset;
1585
1586	if (host->use_dma == TRANS_MODE_IDMAC)
1587		dw_mci_idmac_reset(host);
 
 
 
 
 
 
 
 
1588
1589	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1590				     SDMMC_CTRL_FIFO_RESET))
1591		return;
 
 
 
 
 
 
1592
1593	/*
1594	 * According to eMMC spec, card reset procedure:
1595	 * tRstW >= 1us:   RST_n pulse width
1596	 * tRSCA >= 200us: RST_n to Command time
1597	 * tRSTH >= 1us:   RST_n high period
1598	 */
1599	reset = mci_readl(host, RST_N);
1600	reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1601	mci_writel(host, RST_N, reset);
1602	usleep_range(1, 2);
1603	reset |= SDMMC_RST_HWACTIVE << slot->id;
1604	mci_writel(host, RST_N, reset);
1605	usleep_range(200, 300);
1606}
1607
1608static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
 
 
 
 
 
 
 
 
 
1609{
1610	struct dw_mci_slot *slot = mmc_priv(mmc);
1611	struct dw_mci *host = slot->host;
 
 
1612
1613	/*
1614	 * Low power mode will stop the card clock when idle.  According to the
1615	 * description of the CLKENA register we should disable low power mode
1616	 * for SDIO cards if we need SDIO interrupts to work.
1617	 */
1618	if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1619		const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1620		u32 clk_en_a_old;
1621		u32 clk_en_a;
1622
1623		clk_en_a_old = mci_readl(host, CLKENA);
1624
1625		if (card->type == MMC_TYPE_SDIO ||
1626		    card->type == MMC_TYPE_SD_COMBO) {
1627			set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1628			clk_en_a = clk_en_a_old & ~clken_low_pwr;
1629		} else {
1630			clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1631			clk_en_a = clk_en_a_old | clken_low_pwr;
1632		}
1633
1634		if (clk_en_a != clk_en_a_old) {
1635			mci_writel(host, CLKENA, clk_en_a);
1636			mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1637				     SDMMC_CMD_PRV_DAT_WAIT, 0);
1638		}
1639	}
1640}
1641
1642static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1643{
 
1644	struct dw_mci *host = slot->host;
1645	unsigned long irqflags;
1646	u32 int_mask;
1647
1648	spin_lock_irqsave(&host->irq_lock, irqflags);
1649
1650	/* Enable/disable Slot Specific SDIO interrupt */
1651	int_mask = mci_readl(host, INTMASK);
1652	if (enb)
1653		int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1654	else
1655		int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1656	mci_writel(host, INTMASK, int_mask);
 
 
 
1657
1658	spin_unlock_irqrestore(&host->irq_lock, irqflags);
1659}
1660
1661static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1662{
1663	struct dw_mci_slot *slot = mmc_priv(mmc);
1664	struct dw_mci *host = slot->host;
1665
1666	__dw_mci_enable_sdio_irq(slot, enb);
1667
1668	/* Avoid runtime suspending the device when SDIO IRQ is enabled */
1669	if (enb)
1670		pm_runtime_get_noresume(host->dev);
1671	else
1672		pm_runtime_put_noidle(host->dev);
1673}
1674
1675static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1676{
1677	struct dw_mci_slot *slot = mmc_priv(mmc);
1678
1679	__dw_mci_enable_sdio_irq(slot, 1);
1680}
1681
1682static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1683{
1684	struct dw_mci_slot *slot = mmc_priv(mmc);
1685	struct dw_mci *host = slot->host;
1686	const struct dw_mci_drv_data *drv_data = host->drv_data;
1687	int err = -EINVAL;
 
1688
1689	if (drv_data && drv_data->execute_tuning)
1690		err = drv_data->execute_tuning(slot, opcode);
1691	return err;
1692}
1693
1694static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1695				       struct mmc_ios *ios)
1696{
1697	struct dw_mci_slot *slot = mmc_priv(mmc);
1698	struct dw_mci *host = slot->host;
1699	const struct dw_mci_drv_data *drv_data = host->drv_data;
1700
1701	if (drv_data && drv_data->prepare_hs400_tuning)
1702		return drv_data->prepare_hs400_tuning(host, ios);
1703
1704	return 0;
1705}
1706
1707static bool dw_mci_reset(struct dw_mci *host)
1708{
1709	u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1710	bool ret = false;
1711	u32 status = 0;
1712
1713	/*
1714	 * Resetting generates a block interrupt, hence setting
1715	 * the scatter-gather pointer to NULL.
1716	 */
1717	if (host->sg) {
1718		sg_miter_stop(&host->sg_miter);
1719		host->sg = NULL;
1720	}
1721
1722	if (host->use_dma)
1723		flags |= SDMMC_CTRL_DMA_RESET;
1724
1725	if (dw_mci_ctrl_reset(host, flags)) {
1726		/*
1727		 * In all cases we clear the RAWINTS
1728		 * register to clear any interrupts.
1729		 */
1730		mci_writel(host, RINTSTS, 0xFFFFFFFF);
1731
1732		if (!host->use_dma) {
1733			ret = true;
1734			goto ciu_out;
1735		}
1736
1737		/* Wait for dma_req to be cleared */
1738		if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1739					      status,
1740					      !(status & SDMMC_STATUS_DMA_REQ),
1741					      1, 500 * USEC_PER_MSEC)) {
1742			dev_err(host->dev,
1743				"%s: Timeout waiting for dma_req to be cleared\n",
1744				__func__);
1745			goto ciu_out;
1746		}
1747
1748		/* when using DMA next we reset the fifo again */
1749		if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1750			goto ciu_out;
1751	} else {
1752		/* if the controller reset bit did clear, then set clock regs */
1753		if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1754			dev_err(host->dev,
1755				"%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1756				__func__);
1757			goto ciu_out;
1758		}
1759	}
1760
1761	if (host->use_dma == TRANS_MODE_IDMAC)
1762		/* It is also required that we reinit idmac */
1763		dw_mci_idmac_init(host);
1764
1765	ret = true;
1766
1767ciu_out:
1768	/* After a CTRL reset we need to have CIU set clock registers  */
1769	mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1770
1771	return ret;
1772}
1773
1774static const struct mmc_host_ops dw_mci_ops = {
1775	.request		= dw_mci_request,
1776	.pre_req		= dw_mci_pre_req,
1777	.post_req		= dw_mci_post_req,
1778	.set_ios		= dw_mci_set_ios,
1779	.get_ro			= dw_mci_get_ro,
1780	.get_cd			= dw_mci_get_cd,
1781	.hw_reset               = dw_mci_hw_reset,
1782	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
1783	.ack_sdio_irq		= dw_mci_ack_sdio_irq,
1784	.execute_tuning		= dw_mci_execute_tuning,
1785	.card_busy		= dw_mci_card_busy,
1786	.start_signal_voltage_switch = dw_mci_switch_voltage,
1787	.init_card		= dw_mci_init_card,
1788	.prepare_hs400_tuning	= dw_mci_prepare_hs400_tuning,
1789};
1790
1791static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1792	__releases(&host->lock)
1793	__acquires(&host->lock)
1794{
1795	struct dw_mci_slot *slot;
1796	struct mmc_host	*prev_mmc = host->slot->mmc;
1797
1798	WARN_ON(host->cmd || host->data);
1799
1800	host->slot->mrq = NULL;
1801	host->mrq = NULL;
1802	if (!list_empty(&host->queue)) {
1803		slot = list_entry(host->queue.next,
1804				  struct dw_mci_slot, queue_node);
1805		list_del(&slot->queue_node);
1806		dev_vdbg(host->dev, "list not empty: %s is next\n",
1807			 mmc_hostname(slot->mmc));
1808		host->state = STATE_SENDING_CMD;
1809		dw_mci_start_request(host, slot);
1810	} else {
1811		dev_vdbg(host->dev, "list empty\n");
1812
1813		if (host->state == STATE_SENDING_CMD11)
1814			host->state = STATE_WAITING_CMD11_DONE;
1815		else
1816			host->state = STATE_IDLE;
1817	}
1818
1819	spin_unlock(&host->lock);
1820	mmc_request_done(prev_mmc, mrq);
1821	spin_lock(&host->lock);
1822}
1823
1824static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1825{
1826	u32 status = host->cmd_status;
1827
1828	host->cmd_status = 0;
1829
1830	/* Read the response from the card (up to 16 bytes) */
1831	if (cmd->flags & MMC_RSP_PRESENT) {
1832		if (cmd->flags & MMC_RSP_136) {
1833			cmd->resp[3] = mci_readl(host, RESP0);
1834			cmd->resp[2] = mci_readl(host, RESP1);
1835			cmd->resp[1] = mci_readl(host, RESP2);
1836			cmd->resp[0] = mci_readl(host, RESP3);
1837		} else {
1838			cmd->resp[0] = mci_readl(host, RESP0);
1839			cmd->resp[1] = 0;
1840			cmd->resp[2] = 0;
1841			cmd->resp[3] = 0;
1842		}
1843	}
1844
1845	if (status & SDMMC_INT_RTO)
1846		cmd->error = -ETIMEDOUT;
1847	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1848		cmd->error = -EILSEQ;
1849	else if (status & SDMMC_INT_RESP_ERR)
1850		cmd->error = -EIO;
1851	else
1852		cmd->error = 0;
1853
 
 
 
 
 
 
1854	return cmd->error;
1855}
1856
1857static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1858{
1859	u32 status = host->data_status;
1860
1861	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1862		if (status & SDMMC_INT_DRTO) {
1863			data->error = -ETIMEDOUT;
1864		} else if (status & SDMMC_INT_DCRC) {
1865			data->error = -EILSEQ;
1866		} else if (status & SDMMC_INT_EBE) {
1867			if (host->dir_status ==
1868				DW_MCI_SEND_STATUS) {
1869				/*
1870				 * No data CRC status was returned.
1871				 * The number of bytes transferred
1872				 * will be exaggerated in PIO mode.
1873				 */
1874				data->bytes_xfered = 0;
1875				data->error = -ETIMEDOUT;
1876			} else if (host->dir_status ==
1877					DW_MCI_RECV_STATUS) {
1878				data->error = -EILSEQ;
1879			}
1880		} else {
1881			/* SDMMC_INT_SBE is included */
1882			data->error = -EILSEQ;
1883		}
1884
1885		dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1886
1887		/*
1888		 * After an error, there may be data lingering
1889		 * in the FIFO
1890		 */
1891		dw_mci_reset(host);
1892	} else {
1893		data->bytes_xfered = data->blocks * data->blksz;
1894		data->error = 0;
1895	}
1896
1897	return data->error;
1898}
1899
1900static void dw_mci_set_drto(struct dw_mci *host)
1901{
1902	unsigned int drto_clks;
1903	unsigned int drto_div;
1904	unsigned int drto_ms;
1905	unsigned long irqflags;
1906
1907	drto_clks = mci_readl(host, TMOUT) >> 8;
1908	drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1909	if (drto_div == 0)
1910		drto_div = 1;
1911
1912	drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
1913				   host->bus_hz);
1914
1915	/* add a bit spare time */
1916	drto_ms += 10;
1917
1918	spin_lock_irqsave(&host->irq_lock, irqflags);
1919	if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1920		mod_timer(&host->dto_timer,
1921			  jiffies + msecs_to_jiffies(drto_ms));
1922	spin_unlock_irqrestore(&host->irq_lock, irqflags);
1923}
1924
1925static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
1926{
1927	if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1928		return false;
1929
1930	/*
1931	 * Really be certain that the timer has stopped.  This is a bit of
1932	 * paranoia and could only really happen if we had really bad
1933	 * interrupt latency and the interrupt routine and timeout were
1934	 * running concurrently so that the del_timer() in the interrupt
1935	 * handler couldn't run.
1936	 */
1937	WARN_ON(del_timer_sync(&host->cto_timer));
1938	clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1939
1940	return true;
1941}
1942
1943static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
1944{
1945	if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1946		return false;
1947
1948	/* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
1949	WARN_ON(del_timer_sync(&host->dto_timer));
1950	clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1951
1952	return true;
1953}
1954
1955static void dw_mci_tasklet_func(unsigned long priv)
1956{
1957	struct dw_mci *host = (struct dw_mci *)priv;
1958	struct mmc_data	*data;
1959	struct mmc_command *cmd;
1960	struct mmc_request *mrq;
1961	enum dw_mci_state state;
1962	enum dw_mci_state prev_state;
1963	unsigned int err;
1964
1965	spin_lock(&host->lock);
1966
1967	state = host->state;
1968	data = host->data;
1969	mrq = host->mrq;
1970
1971	do {
1972		prev_state = state;
1973
1974		switch (state) {
1975		case STATE_IDLE:
1976		case STATE_WAITING_CMD11_DONE:
1977			break;
1978
1979		case STATE_SENDING_CMD11:
1980		case STATE_SENDING_CMD:
1981			if (!dw_mci_clear_pending_cmd_complete(host))
 
1982				break;
1983
1984			cmd = host->cmd;
1985			host->cmd = NULL;
1986			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1987			err = dw_mci_command_complete(host, cmd);
1988			if (cmd == mrq->sbc && !err) {
1989				__dw_mci_start_request(host, host->slot,
 
1990						       mrq->cmd);
1991				goto unlock;
1992			}
1993
1994			if (cmd->data && err) {
1995				/*
1996				 * During UHS tuning sequence, sending the stop
1997				 * command after the response CRC error would
1998				 * throw the system into a confused state
1999				 * causing all future tuning phases to report
2000				 * failure.
2001				 *
2002				 * In such case controller will move into a data
2003				 * transfer state after a response error or
2004				 * response CRC error. Let's let that finish
2005				 * before trying to send a stop, so we'll go to
2006				 * STATE_SENDING_DATA.
2007				 *
2008				 * Although letting the data transfer take place
2009				 * will waste a bit of time (we already know
2010				 * the command was bad), it can't cause any
2011				 * errors since it's possible it would have
2012				 * taken place anyway if this tasklet got
2013				 * delayed. Allowing the transfer to take place
2014				 * avoids races and keeps things simple.
2015				 */
2016				if (err != -ETIMEDOUT) {
2017					state = STATE_SENDING_DATA;
2018					continue;
2019				}
2020
2021				dw_mci_stop_dma(host);
2022				send_stop_abort(host, data);
2023				state = STATE_SENDING_STOP;
2024				break;
2025			}
2026
2027			if (!cmd->data || err) {
2028				dw_mci_request_end(host, mrq);
2029				goto unlock;
2030			}
2031
2032			prev_state = state = STATE_SENDING_DATA;
2033			fallthrough;
2034
2035		case STATE_SENDING_DATA:
2036			/*
2037			 * We could get a data error and never a transfer
2038			 * complete so we'd better check for it here.
2039			 *
2040			 * Note that we don't really care if we also got a
2041			 * transfer complete; stopping the DMA and sending an
2042			 * abort won't hurt.
2043			 */
2044			if (test_and_clear_bit(EVENT_DATA_ERROR,
2045					       &host->pending_events)) {
2046				dw_mci_stop_dma(host);
2047				if (!(host->data_status & (SDMMC_INT_DRTO |
2048							   SDMMC_INT_EBE)))
2049					send_stop_abort(host, data);
2050				state = STATE_DATA_ERROR;
2051				break;
2052			}
2053
2054			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2055						&host->pending_events)) {
2056				/*
2057				 * If all data-related interrupts don't come
2058				 * within the given time in reading data state.
2059				 */
2060				if (host->dir_status == DW_MCI_RECV_STATUS)
2061					dw_mci_set_drto(host);
2062				break;
2063			}
2064
2065			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2066
2067			/*
2068			 * Handle an EVENT_DATA_ERROR that might have shown up
2069			 * before the transfer completed.  This might not have
2070			 * been caught by the check above because the interrupt
2071			 * could have gone off between the previous check and
2072			 * the check for transfer complete.
2073			 *
2074			 * Technically this ought not be needed assuming we
2075			 * get a DATA_COMPLETE eventually (we'll notice the
2076			 * error and end the request), but it shouldn't hurt.
2077			 *
2078			 * This has the advantage of sending the stop command.
2079			 */
2080			if (test_and_clear_bit(EVENT_DATA_ERROR,
2081					       &host->pending_events)) {
2082				dw_mci_stop_dma(host);
2083				if (!(host->data_status & (SDMMC_INT_DRTO |
2084							   SDMMC_INT_EBE)))
2085					send_stop_abort(host, data);
2086				state = STATE_DATA_ERROR;
2087				break;
2088			}
2089			prev_state = state = STATE_DATA_BUSY;
2090
2091			fallthrough;
2092
2093		case STATE_DATA_BUSY:
2094			if (!dw_mci_clear_pending_data_complete(host)) {
2095				/*
2096				 * If data error interrupt comes but data over
2097				 * interrupt doesn't come within the given time.
2098				 * in reading data state.
2099				 */
2100				if (host->dir_status == DW_MCI_RECV_STATUS)
2101					dw_mci_set_drto(host);
2102				break;
2103			}
2104
2105			host->data = NULL;
2106			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2107			err = dw_mci_data_complete(host, data);
2108
2109			if (!err) {
2110				if (!data->stop || mrq->sbc) {
2111					if (mrq->sbc && data->stop)
2112						data->stop->error = 0;
2113					dw_mci_request_end(host, mrq);
2114					goto unlock;
2115				}
2116
2117				/* stop command for open-ended transfer*/
2118				if (data->stop)
2119					send_stop_abort(host, data);
2120			} else {
2121				/*
2122				 * If we don't have a command complete now we'll
2123				 * never get one since we just reset everything;
2124				 * better end the request.
2125				 *
2126				 * If we do have a command complete we'll fall
2127				 * through to the SENDING_STOP command and
2128				 * everything will be peachy keen.
2129				 */
2130				if (!test_bit(EVENT_CMD_COMPLETE,
2131					      &host->pending_events)) {
2132					host->cmd = NULL;
2133					dw_mci_request_end(host, mrq);
2134					goto unlock;
2135				}
2136			}
2137
2138			/*
2139			 * If err has non-zero,
2140			 * stop-abort command has been already issued.
2141			 */
2142			prev_state = state = STATE_SENDING_STOP;
2143
2144			fallthrough;
2145
2146		case STATE_SENDING_STOP:
2147			if (!dw_mci_clear_pending_cmd_complete(host))
 
2148				break;
2149
2150			/* CMD error in data command */
2151			if (mrq->cmd->error && mrq->data)
2152				dw_mci_reset(host);
2153
2154			host->cmd = NULL;
2155			host->data = NULL;
2156
2157			if (!mrq->sbc && mrq->stop)
2158				dw_mci_command_complete(host, mrq->stop);
2159			else
2160				host->cmd_status = 0;
2161
2162			dw_mci_request_end(host, mrq);
2163			goto unlock;
2164
2165		case STATE_DATA_ERROR:
2166			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2167						&host->pending_events))
2168				break;
2169
2170			state = STATE_DATA_BUSY;
2171			break;
2172		}
2173	} while (state != prev_state);
2174
2175	host->state = state;
2176unlock:
2177	spin_unlock(&host->lock);
2178
2179}
2180
2181/* push final bytes to part_buf, only use during push */
2182static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2183{
2184	memcpy((void *)&host->part_buf, buf, cnt);
2185	host->part_buf_count = cnt;
2186}
2187
2188/* append bytes to part_buf, only use during push */
2189static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2190{
2191	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2192	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2193	host->part_buf_count += cnt;
2194	return cnt;
2195}
2196
2197/* pull first bytes from part_buf, only use during pull */
2198static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2199{
2200	cnt = min_t(int, cnt, host->part_buf_count);
2201	if (cnt) {
2202		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2203		       cnt);
2204		host->part_buf_count -= cnt;
2205		host->part_buf_start += cnt;
2206	}
2207	return cnt;
2208}
2209
2210/* pull final bytes from the part_buf, assuming it's just been filled */
2211static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2212{
2213	memcpy(buf, &host->part_buf, cnt);
2214	host->part_buf_start = cnt;
2215	host->part_buf_count = (1 << host->data_shift) - cnt;
2216}
2217
2218static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2219{
2220	struct mmc_data *data = host->data;
2221	int init_cnt = cnt;
2222
2223	/* try and push anything in the part_buf */
2224	if (unlikely(host->part_buf_count)) {
2225		int len = dw_mci_push_part_bytes(host, buf, cnt);
2226
2227		buf += len;
2228		cnt -= len;
2229		if (host->part_buf_count == 2) {
2230			mci_fifo_writew(host->fifo_reg, host->part_buf16);
 
2231			host->part_buf_count = 0;
2232		}
2233	}
2234#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2235	if (unlikely((unsigned long)buf & 0x1)) {
2236		while (cnt >= 2) {
2237			u16 aligned_buf[64];
2238			int len = min(cnt & -2, (int)sizeof(aligned_buf));
2239			int items = len >> 1;
2240			int i;
2241			/* memcpy from input buffer into aligned buffer */
2242			memcpy(aligned_buf, buf, len);
2243			buf += len;
2244			cnt -= len;
2245			/* push data from aligned buffer into fifo */
2246			for (i = 0; i < items; ++i)
2247				mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
 
2248		}
2249	} else
2250#endif
2251	{
2252		u16 *pdata = buf;
2253
2254		for (; cnt >= 2; cnt -= 2)
2255			mci_fifo_writew(host->fifo_reg, *pdata++);
2256		buf = pdata;
2257	}
2258	/* put anything remaining in the part_buf */
2259	if (cnt) {
2260		dw_mci_set_part_bytes(host, buf, cnt);
2261		 /* Push data if we have reached the expected data length */
2262		if ((data->bytes_xfered + init_cnt) ==
2263		    (data->blksz * data->blocks))
2264			mci_fifo_writew(host->fifo_reg, host->part_buf16);
 
2265	}
2266}
2267
2268static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2269{
2270#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2271	if (unlikely((unsigned long)buf & 0x1)) {
2272		while (cnt >= 2) {
2273			/* pull data from fifo into aligned buffer */
2274			u16 aligned_buf[64];
2275			int len = min(cnt & -2, (int)sizeof(aligned_buf));
2276			int items = len >> 1;
2277			int i;
2278
2279			for (i = 0; i < items; ++i)
2280				aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
 
2281			/* memcpy from aligned buffer into output buffer */
2282			memcpy(buf, aligned_buf, len);
2283			buf += len;
2284			cnt -= len;
2285		}
2286	} else
2287#endif
2288	{
2289		u16 *pdata = buf;
2290
2291		for (; cnt >= 2; cnt -= 2)
2292			*pdata++ = mci_fifo_readw(host->fifo_reg);
2293		buf = pdata;
2294	}
2295	if (cnt) {
2296		host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2297		dw_mci_pull_final_bytes(host, buf, cnt);
2298	}
2299}
2300
2301static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2302{
2303	struct mmc_data *data = host->data;
2304	int init_cnt = cnt;
2305
2306	/* try and push anything in the part_buf */
2307	if (unlikely(host->part_buf_count)) {
2308		int len = dw_mci_push_part_bytes(host, buf, cnt);
2309
2310		buf += len;
2311		cnt -= len;
2312		if (host->part_buf_count == 4) {
2313			mci_fifo_writel(host->fifo_reg,	host->part_buf32);
 
2314			host->part_buf_count = 0;
2315		}
2316	}
2317#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2318	if (unlikely((unsigned long)buf & 0x3)) {
2319		while (cnt >= 4) {
2320			u32 aligned_buf[32];
2321			int len = min(cnt & -4, (int)sizeof(aligned_buf));
2322			int items = len >> 2;
2323			int i;
2324			/* memcpy from input buffer into aligned buffer */
2325			memcpy(aligned_buf, buf, len);
2326			buf += len;
2327			cnt -= len;
2328			/* push data from aligned buffer into fifo */
2329			for (i = 0; i < items; ++i)
2330				mci_fifo_writel(host->fifo_reg,	aligned_buf[i]);
 
2331		}
2332	} else
2333#endif
2334	{
2335		u32 *pdata = buf;
2336
2337		for (; cnt >= 4; cnt -= 4)
2338			mci_fifo_writel(host->fifo_reg, *pdata++);
2339		buf = pdata;
2340	}
2341	/* put anything remaining in the part_buf */
2342	if (cnt) {
2343		dw_mci_set_part_bytes(host, buf, cnt);
2344		 /* Push data if we have reached the expected data length */
2345		if ((data->bytes_xfered + init_cnt) ==
2346		    (data->blksz * data->blocks))
2347			mci_fifo_writel(host->fifo_reg, host->part_buf32);
 
2348	}
2349}
2350
2351static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2352{
2353#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2354	if (unlikely((unsigned long)buf & 0x3)) {
2355		while (cnt >= 4) {
2356			/* pull data from fifo into aligned buffer */
2357			u32 aligned_buf[32];
2358			int len = min(cnt & -4, (int)sizeof(aligned_buf));
2359			int items = len >> 2;
2360			int i;
2361
2362			for (i = 0; i < items; ++i)
2363				aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
 
2364			/* memcpy from aligned buffer into output buffer */
2365			memcpy(buf, aligned_buf, len);
2366			buf += len;
2367			cnt -= len;
2368		}
2369	} else
2370#endif
2371	{
2372		u32 *pdata = buf;
2373
2374		for (; cnt >= 4; cnt -= 4)
2375			*pdata++ = mci_fifo_readl(host->fifo_reg);
2376		buf = pdata;
2377	}
2378	if (cnt) {
2379		host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2380		dw_mci_pull_final_bytes(host, buf, cnt);
2381	}
2382}
2383
2384static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2385{
2386	struct mmc_data *data = host->data;
2387	int init_cnt = cnt;
2388
2389	/* try and push anything in the part_buf */
2390	if (unlikely(host->part_buf_count)) {
2391		int len = dw_mci_push_part_bytes(host, buf, cnt);
2392
2393		buf += len;
2394		cnt -= len;
2395
2396		if (host->part_buf_count == 8) {
2397			mci_fifo_writeq(host->fifo_reg,	host->part_buf);
 
2398			host->part_buf_count = 0;
2399		}
2400	}
2401#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2402	if (unlikely((unsigned long)buf & 0x7)) {
2403		while (cnt >= 8) {
2404			u64 aligned_buf[16];
2405			int len = min(cnt & -8, (int)sizeof(aligned_buf));
2406			int items = len >> 3;
2407			int i;
2408			/* memcpy from input buffer into aligned buffer */
2409			memcpy(aligned_buf, buf, len);
2410			buf += len;
2411			cnt -= len;
2412			/* push data from aligned buffer into fifo */
2413			for (i = 0; i < items; ++i)
2414				mci_fifo_writeq(host->fifo_reg,	aligned_buf[i]);
 
2415		}
2416	} else
2417#endif
2418	{
2419		u64 *pdata = buf;
2420
2421		for (; cnt >= 8; cnt -= 8)
2422			mci_fifo_writeq(host->fifo_reg, *pdata++);
2423		buf = pdata;
2424	}
2425	/* put anything remaining in the part_buf */
2426	if (cnt) {
2427		dw_mci_set_part_bytes(host, buf, cnt);
2428		/* Push data if we have reached the expected data length */
2429		if ((data->bytes_xfered + init_cnt) ==
2430		    (data->blksz * data->blocks))
2431			mci_fifo_writeq(host->fifo_reg, host->part_buf);
 
2432	}
2433}
2434
2435static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2436{
2437#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2438	if (unlikely((unsigned long)buf & 0x7)) {
2439		while (cnt >= 8) {
2440			/* pull data from fifo into aligned buffer */
2441			u64 aligned_buf[16];
2442			int len = min(cnt & -8, (int)sizeof(aligned_buf));
2443			int items = len >> 3;
2444			int i;
2445
2446			for (i = 0; i < items; ++i)
2447				aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2448
2449			/* memcpy from aligned buffer into output buffer */
2450			memcpy(buf, aligned_buf, len);
2451			buf += len;
2452			cnt -= len;
2453		}
2454	} else
2455#endif
2456	{
2457		u64 *pdata = buf;
2458
2459		for (; cnt >= 8; cnt -= 8)
2460			*pdata++ = mci_fifo_readq(host->fifo_reg);
2461		buf = pdata;
2462	}
2463	if (cnt) {
2464		host->part_buf = mci_fifo_readq(host->fifo_reg);
2465		dw_mci_pull_final_bytes(host, buf, cnt);
2466	}
2467}
2468
2469static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2470{
2471	int len;
2472
2473	/* get remaining partial bytes */
2474	len = dw_mci_pull_part_bytes(host, buf, cnt);
2475	if (unlikely(len == cnt))
2476		return;
2477	buf += len;
2478	cnt -= len;
2479
2480	/* get the rest of the data */
2481	host->pull_data(host, buf, cnt);
2482}
2483
2484static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2485{
2486	struct sg_mapping_iter *sg_miter = &host->sg_miter;
2487	void *buf;
2488	unsigned int offset;
2489	struct mmc_data	*data = host->data;
2490	int shift = host->data_shift;
2491	u32 status;
2492	unsigned int len;
2493	unsigned int remain, fcnt;
2494
2495	do {
2496		if (!sg_miter_next(sg_miter))
2497			goto done;
2498
2499		host->sg = sg_miter->piter.sg;
2500		buf = sg_miter->addr;
2501		remain = sg_miter->length;
2502		offset = 0;
2503
2504		do {
2505			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2506					<< shift) + host->part_buf_count;
2507			len = min(remain, fcnt);
2508			if (!len)
2509				break;
2510			dw_mci_pull_data(host, (void *)(buf + offset), len);
2511			data->bytes_xfered += len;
2512			offset += len;
2513			remain -= len;
2514		} while (remain);
2515
2516		sg_miter->consumed = offset;
2517		status = mci_readl(host, MINTSTS);
2518		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2519	/* if the RXDR is ready read again */
2520	} while ((status & SDMMC_INT_RXDR) ||
2521		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2522
2523	if (!remain) {
2524		if (!sg_miter_next(sg_miter))
2525			goto done;
2526		sg_miter->consumed = 0;
2527	}
2528	sg_miter_stop(sg_miter);
2529	return;
2530
2531done:
2532	sg_miter_stop(sg_miter);
2533	host->sg = NULL;
2534	smp_wmb(); /* drain writebuffer */
2535	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2536}
2537
2538static void dw_mci_write_data_pio(struct dw_mci *host)
2539{
2540	struct sg_mapping_iter *sg_miter = &host->sg_miter;
2541	void *buf;
2542	unsigned int offset;
2543	struct mmc_data	*data = host->data;
2544	int shift = host->data_shift;
2545	u32 status;
2546	unsigned int len;
2547	unsigned int fifo_depth = host->fifo_depth;
2548	unsigned int remain, fcnt;
2549
2550	do {
2551		if (!sg_miter_next(sg_miter))
2552			goto done;
2553
2554		host->sg = sg_miter->piter.sg;
2555		buf = sg_miter->addr;
2556		remain = sg_miter->length;
2557		offset = 0;
2558
2559		do {
2560			fcnt = ((fifo_depth -
2561				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2562					<< shift) - host->part_buf_count;
2563			len = min(remain, fcnt);
2564			if (!len)
2565				break;
2566			host->push_data(host, (void *)(buf + offset), len);
2567			data->bytes_xfered += len;
2568			offset += len;
2569			remain -= len;
2570		} while (remain);
2571
2572		sg_miter->consumed = offset;
2573		status = mci_readl(host, MINTSTS);
2574		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2575	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2576
2577	if (!remain) {
2578		if (!sg_miter_next(sg_miter))
2579			goto done;
2580		sg_miter->consumed = 0;
2581	}
2582	sg_miter_stop(sg_miter);
2583	return;
2584
2585done:
2586	sg_miter_stop(sg_miter);
2587	host->sg = NULL;
2588	smp_wmb(); /* drain writebuffer */
2589	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2590}
2591
2592static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2593{
2594	del_timer(&host->cto_timer);
2595
2596	if (!host->cmd_status)
2597		host->cmd_status = status;
2598
2599	smp_wmb(); /* drain writebuffer */
2600
2601	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2602	tasklet_schedule(&host->tasklet);
2603}
2604
2605static void dw_mci_handle_cd(struct dw_mci *host)
2606{
2607	struct dw_mci_slot *slot = host->slot;
2608
2609	if (slot->mmc->ops->card_event)
2610		slot->mmc->ops->card_event(slot->mmc);
2611	mmc_detect_change(slot->mmc,
2612		msecs_to_jiffies(host->pdata->detect_delay_ms));
2613}
2614
2615static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2616{
2617	struct dw_mci *host = dev_id;
2618	u32 pending;
2619	struct dw_mci_slot *slot = host->slot;
2620	unsigned long irqflags;
2621
2622	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2623
 
 
 
 
 
 
 
 
 
 
2624	if (pending) {
2625		/* Check volt switch first, since it can look like an error */
2626		if ((host->state == STATE_SENDING_CMD11) &&
2627		    (pending & SDMMC_INT_VOLT_SWITCH)) {
2628			mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2629			pending &= ~SDMMC_INT_VOLT_SWITCH;
2630
2631			/*
2632			 * Hold the lock; we know cmd11_timer can't be kicked
2633			 * off after the lock is released, so safe to delete.
2634			 */
2635			spin_lock_irqsave(&host->irq_lock, irqflags);
2636			dw_mci_cmd_interrupt(host, pending);
2637			spin_unlock_irqrestore(&host->irq_lock, irqflags);
2638
2639			del_timer(&host->cmd11_timer);
2640		}
2641
2642		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2643			spin_lock_irqsave(&host->irq_lock, irqflags);
2644
2645			del_timer(&host->cto_timer);
2646			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2647			host->cmd_status = pending;
2648			smp_wmb(); /* drain writebuffer */
2649			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2650
2651			spin_unlock_irqrestore(&host->irq_lock, irqflags);
2652		}
2653
2654		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2655			/* if there is an error report DATA_ERROR */
2656			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2657			host->data_status = pending;
2658			smp_wmb(); /* drain writebuffer */
2659			set_bit(EVENT_DATA_ERROR, &host->pending_events);
2660			tasklet_schedule(&host->tasklet);
2661		}
2662
2663		if (pending & SDMMC_INT_DATA_OVER) {
2664			spin_lock_irqsave(&host->irq_lock, irqflags);
2665
2666			del_timer(&host->dto_timer);
2667
2668			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2669			if (!host->data_status)
2670				host->data_status = pending;
2671			smp_wmb(); /* drain writebuffer */
2672			if (host->dir_status == DW_MCI_RECV_STATUS) {
2673				if (host->sg != NULL)
2674					dw_mci_read_data_pio(host, true);
2675			}
2676			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2677			tasklet_schedule(&host->tasklet);
2678
2679			spin_unlock_irqrestore(&host->irq_lock, irqflags);
2680		}
2681
2682		if (pending & SDMMC_INT_RXDR) {
2683			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2684			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2685				dw_mci_read_data_pio(host, false);
2686		}
2687
2688		if (pending & SDMMC_INT_TXDR) {
2689			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2690			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2691				dw_mci_write_data_pio(host);
2692		}
2693
2694		if (pending & SDMMC_INT_CMD_DONE) {
2695			spin_lock_irqsave(&host->irq_lock, irqflags);
2696
2697			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2698			dw_mci_cmd_interrupt(host, pending);
2699
2700			spin_unlock_irqrestore(&host->irq_lock, irqflags);
2701		}
2702
2703		if (pending & SDMMC_INT_CD) {
2704			mci_writel(host, RINTSTS, SDMMC_INT_CD);
2705			dw_mci_handle_cd(host);
2706		}
2707
2708		if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2709			mci_writel(host, RINTSTS,
2710				   SDMMC_INT_SDIO(slot->sdio_id));
2711			__dw_mci_enable_sdio_irq(slot, 0);
2712			sdio_signal_irq(slot->mmc);
 
 
2713		}
2714
2715	}
2716
2717	if (host->use_dma != TRANS_MODE_IDMAC)
2718		return IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2719
2720	/* Handle IDMA interrupts */
2721	if (host->dma_64bit_address == 1) {
2722		pending = mci_readl(host, IDSTS64);
2723		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2724			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2725							SDMMC_IDMAC_INT_RI);
2726			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2727			if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2728				host->dma_ops->complete((void *)host);
2729		}
2730	} else {
2731		pending = mci_readl(host, IDSTS);
2732		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2733			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2734							SDMMC_IDMAC_INT_RI);
2735			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2736			if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2737				host->dma_ops->complete((void *)host);
 
 
 
 
 
 
 
 
 
 
2738		}
 
 
 
2739	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2740
2741	return IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2742}
2743
2744static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
 
2745{
2746	struct dw_mci *host = slot->host;
2747	const struct dw_mci_drv_data *drv_data = host->drv_data;
2748	struct mmc_host *mmc = slot->mmc;
2749	int ctrl_id;
 
2750
2751	if (host->pdata->caps)
2752		mmc->caps = host->pdata->caps;
2753
2754	if (host->pdata->pm_caps)
2755		mmc->pm_caps = host->pdata->pm_caps;
 
2756
2757	if (host->dev->of_node) {
2758		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2759		if (ctrl_id < 0)
2760			ctrl_id = 0;
2761	} else {
2762		ctrl_id = to_platform_device(host->dev)->id;
2763	}
2764
2765	if (drv_data && drv_data->caps) {
2766		if (ctrl_id >= drv_data->num_caps) {
2767			dev_err(host->dev, "invalid controller id %d\n",
2768				ctrl_id);
2769			return -EINVAL;
2770		}
2771		mmc->caps |= drv_data->caps[ctrl_id];
2772	}
 
2773
2774	if (host->pdata->caps2)
2775		mmc->caps2 = host->pdata->caps2;
2776
2777	mmc->f_min = DW_MCI_FREQ_MIN;
2778	if (!mmc->f_max)
2779		mmc->f_max = DW_MCI_FREQ_MAX;
2780
2781	/* Process SDIO IRQs through the sdio_irq_work. */
2782	if (mmc->caps & MMC_CAP_SDIO_IRQ)
2783		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2784
 
 
 
 
 
 
2785	return 0;
2786}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2787
2788static int dw_mci_init_slot(struct dw_mci *host)
2789{
2790	struct mmc_host *mmc;
2791	struct dw_mci_slot *slot;
2792	int ret;
 
 
 
2793
2794	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2795	if (!mmc)
2796		return -ENOMEM;
2797
2798	slot = mmc_priv(mmc);
2799	slot->id = 0;
2800	slot->sdio_id = host->sdio_id0 + slot->id;
2801	slot->mmc = mmc;
2802	slot->host = host;
2803	host->slot = slot;
 
 
2804
2805	mmc->ops = &dw_mci_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2806
2807	/*if there are external regulators, get them*/
2808	ret = mmc_regulator_get_supply(mmc);
2809	if (ret)
2810		goto err_host_allocated;
 
 
 
 
 
 
 
 
2811
2812	if (!mmc->ocr_avail)
2813		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2814
2815	ret = mmc_of_parse(mmc);
2816	if (ret)
2817		goto err_host_allocated;
 
 
 
2818
2819	ret = dw_mci_init_slot_caps(slot);
2820	if (ret)
2821		goto err_host_allocated;
 
 
 
2822
2823	/* Useful defaults if platform data is unset. */
2824	if (host->use_dma == TRANS_MODE_IDMAC) {
 
 
 
 
 
 
 
2825		mmc->max_segs = host->ring_size;
2826		mmc->max_blk_size = 65535;
 
2827		mmc->max_seg_size = 0x1000;
2828		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2829		mmc->max_blk_count = mmc->max_req_size / 512;
2830	} else if (host->use_dma == TRANS_MODE_EDMAC) {
2831		mmc->max_segs = 64;
2832		mmc->max_blk_size = 65535;
2833		mmc->max_blk_count = 65535;
2834		mmc->max_req_size =
2835				mmc->max_blk_size * mmc->max_blk_count;
2836		mmc->max_seg_size = mmc->max_req_size;
2837	} else {
2838		/* TRANS_MODE_PIO */
2839		mmc->max_segs = 64;
2840		mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2841		mmc->max_blk_count = 512;
2842		mmc->max_req_size = mmc->max_blk_size *
2843				    mmc->max_blk_count;
2844		mmc->max_seg_size = mmc->max_req_size;
 
2845	}
2846
2847	dw_mci_get_cd(mmc);
 
2848
2849	ret = mmc_add_host(mmc);
2850	if (ret)
2851		goto err_host_allocated;
2852
2853#if defined(CONFIG_DEBUG_FS)
2854	dw_mci_init_debugfs(slot);
2855#endif
2856
 
 
 
2857	return 0;
2858
2859err_host_allocated:
2860	mmc_free_host(mmc);
2861	return ret;
2862}
2863
2864static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2865{
 
 
 
 
2866	/* Debugfs stuff is cleaned up by mmc core */
2867	mmc_remove_host(slot->mmc);
2868	slot->host->slot = NULL;
2869	mmc_free_host(slot->mmc);
2870}
2871
2872static void dw_mci_init_dma(struct dw_mci *host)
2873{
2874	int addr_config;
2875	struct device *dev = host->dev;
2876
2877	/*
2878	* Check tansfer mode from HCON[17:16]
2879	* Clear the ambiguous description of dw_mmc databook:
2880	* 2b'00: No DMA Interface -> Actually means using Internal DMA block
2881	* 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2882	* 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2883	* 2b'11: Non DW DMA Interface -> pio only
2884	* Compared to DesignWare DMA Interface, Generic DMA Interface has a
2885	* simpler request/acknowledge handshake mechanism and both of them
2886	* are regarded as external dma master for dw_mmc.
2887	*/
2888	host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2889	if (host->use_dma == DMA_INTERFACE_IDMA) {
2890		host->use_dma = TRANS_MODE_IDMAC;
2891	} else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2892		   host->use_dma == DMA_INTERFACE_GDMA) {
2893		host->use_dma = TRANS_MODE_EDMAC;
2894	} else {
2895		goto no_dma;
2896	}
2897
2898	/* Determine which DMA interface to use */
2899	if (host->use_dma == TRANS_MODE_IDMAC) {
2900		/*
2901		* Check ADDR_CONFIG bit in HCON to find
2902		* IDMAC address bus width
2903		*/
2904		addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
2905
2906		if (addr_config == 1) {
2907			/* host supports IDMAC in 64-bit address mode */
2908			host->dma_64bit_address = 1;
2909			dev_info(host->dev,
2910				 "IDMAC supports 64-bit address mode.\n");
2911			if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2912				dma_set_coherent_mask(host->dev,
2913						      DMA_BIT_MASK(64));
2914		} else {
2915			/* host supports IDMAC in 32-bit address mode */
2916			host->dma_64bit_address = 0;
2917			dev_info(host->dev,
2918				 "IDMAC supports 32-bit address mode.\n");
2919		}
2920
2921		/* Alloc memory for sg translation */
2922		host->sg_cpu = dmam_alloc_coherent(host->dev,
2923						   DESC_RING_BUF_SZ,
2924						   &host->sg_dma, GFP_KERNEL);
2925		if (!host->sg_cpu) {
2926			dev_err(host->dev,
2927				"%s: could not alloc DMA memory\n",
2928				__func__);
2929			goto no_dma;
2930		}
2931
2932		host->dma_ops = &dw_mci_idmac_ops;
2933		dev_info(host->dev, "Using internal DMA controller.\n");
2934	} else {
2935		/* TRANS_MODE_EDMAC: check dma bindings again */
2936		if ((device_property_read_string_array(dev, "dma-names",
2937						       NULL, 0) < 0) ||
2938		    !device_property_present(dev, "dmas")) {
2939			goto no_dma;
2940		}
2941		host->dma_ops = &dw_mci_edmac_ops;
2942		dev_info(host->dev, "Using external DMA controller.\n");
2943	}
2944
2945	if (host->dma_ops->init && host->dma_ops->start &&
2946	    host->dma_ops->stop && host->dma_ops->cleanup) {
2947		if (host->dma_ops->init(host)) {
2948			dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
2949				__func__);
2950			goto no_dma;
2951		}
2952	} else {
2953		dev_err(host->dev, "DMA initialization not found.\n");
2954		goto no_dma;
2955	}
2956
 
2957	return;
2958
2959no_dma:
2960	dev_info(host->dev, "Using PIO mode.\n");
2961	host->use_dma = TRANS_MODE_PIO;
 
2962}
2963
2964static void dw_mci_cmd11_timer(struct timer_list *t)
2965{
2966	struct dw_mci *host = from_timer(host, t, cmd11_timer);
 
2967
2968	if (host->state != STATE_SENDING_CMD11) {
2969		dev_warn(host->dev, "Unexpected CMD11 timeout\n");
2970		return;
2971	}
 
 
 
 
 
 
 
 
 
 
2972
2973	host->cmd_status = SDMMC_INT_RTO;
2974	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2975	tasklet_schedule(&host->tasklet);
2976}
2977
2978static void dw_mci_cto_timer(struct timer_list *t)
2979{
2980	struct dw_mci *host = from_timer(host, t, cto_timer);
2981	unsigned long irqflags;
2982	u32 pending;
2983
2984	spin_lock_irqsave(&host->irq_lock, irqflags);
2985
2986	/*
2987	 * If somehow we have very bad interrupt latency it's remotely possible
2988	 * that the timer could fire while the interrupt is still pending or
2989	 * while the interrupt is midway through running.  Let's be paranoid
2990	 * and detect those two cases.  Note that this is paranoia is somewhat
2991	 * justified because in this function we don't actually cancel the
2992	 * pending command in the controller--we just assume it will never come.
2993	 */
2994	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2995	if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
2996		/* The interrupt should fire; no need to act but we can warn */
2997		dev_warn(host->dev, "Unexpected interrupt latency\n");
2998		goto exit;
2999	}
3000	if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3001		/* Presumably interrupt handler couldn't delete the timer */
3002		dev_warn(host->dev, "CTO timeout when already completed\n");
3003		goto exit;
3004	}
3005
3006	/*
3007	 * Continued paranoia to make sure we're in the state we expect.
3008	 * This paranoia isn't really justified but it seems good to be safe.
3009	 */
3010	switch (host->state) {
3011	case STATE_SENDING_CMD11:
3012	case STATE_SENDING_CMD:
3013	case STATE_SENDING_STOP:
3014		/*
3015		 * If CMD_DONE interrupt does NOT come in sending command
3016		 * state, we should notify the driver to terminate current
3017		 * transfer and report a command timeout to the core.
3018		 */
3019		host->cmd_status = SDMMC_INT_RTO;
3020		set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3021		tasklet_schedule(&host->tasklet);
3022		break;
3023	default:
3024		dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3025			 host->state);
3026		break;
3027	}
3028
3029exit:
3030	spin_unlock_irqrestore(&host->irq_lock, irqflags);
3031}
3032
3033static void dw_mci_dto_timer(struct timer_list *t)
3034{
3035	struct dw_mci *host = from_timer(host, t, dto_timer);
3036	unsigned long irqflags;
3037	u32 pending;
3038
3039	spin_lock_irqsave(&host->irq_lock, irqflags);
3040
3041	/*
3042	 * The DTO timer is much longer than the CTO timer, so it's even less
3043	 * likely that we'll these cases, but it pays to be paranoid.
3044	 */
3045	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3046	if (pending & SDMMC_INT_DATA_OVER) {
3047		/* The interrupt should fire; no need to act but we can warn */
3048		dev_warn(host->dev, "Unexpected data interrupt latency\n");
3049		goto exit;
3050	}
3051	if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3052		/* Presumably interrupt handler couldn't delete the timer */
3053		dev_warn(host->dev, "DTO timeout when already completed\n");
3054		goto exit;
3055	}
3056
3057	/*
3058	 * Continued paranoia to make sure we're in the state we expect.
3059	 * This paranoia isn't really justified but it seems good to be safe.
3060	 */
3061	switch (host->state) {
3062	case STATE_SENDING_DATA:
3063	case STATE_DATA_BUSY:
3064		/*
3065		 * If DTO interrupt does NOT come in sending data state,
3066		 * we should notify the driver to terminate current transfer
3067		 * and report a data timeout to the core.
3068		 */
3069		host->data_status = SDMMC_INT_DRTO;
3070		set_bit(EVENT_DATA_ERROR, &host->pending_events);
3071		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3072		tasklet_schedule(&host->tasklet);
3073		break;
3074	default:
3075		dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3076			 host->state);
3077		break;
3078	}
3079
3080exit:
3081	spin_unlock_irqrestore(&host->irq_lock, irqflags);
3082}
3083
3084#ifdef CONFIG_OF
 
 
 
 
 
 
 
 
 
 
3085static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3086{
3087	struct dw_mci_board *pdata;
3088	struct device *dev = host->dev;
 
3089	const struct dw_mci_drv_data *drv_data = host->drv_data;
3090	int ret;
3091	u32 clock_frequency;
3092
3093	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3094	if (!pdata)
 
3095		return ERR_PTR(-ENOMEM);
3096
3097	/* find reset controller when exist */
3098	pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3099	if (IS_ERR(pdata->rstc)) {
3100		if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
3101			return ERR_PTR(-EPROBE_DEFER);
3102	}
3103
3104	if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3105		dev_info(dev,
3106			 "fifo-depth property not found, using value of FIFOTH register as default\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
3107
3108	device_property_read_u32(dev, "card-detect-delay",
3109				 &pdata->detect_delay_ms);
3110
3111	device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3112
3113	if (device_property_present(dev, "fifo-watermark-aligned"))
3114		host->wm_aligned = true;
3115
3116	if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3117		pdata->bus_hz = clock_frequency;
3118
3119	if (drv_data && drv_data->parse_dt) {
3120		ret = drv_data->parse_dt(host);
3121		if (ret)
3122			return ERR_PTR(ret);
3123	}
3124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3125	return pdata;
3126}
3127
3128#else /* CONFIG_OF */
3129static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3130{
3131	return ERR_PTR(-EINVAL);
3132}
3133#endif /* CONFIG_OF */
3134
3135static void dw_mci_enable_cd(struct dw_mci *host)
3136{
3137	unsigned long irqflags;
3138	u32 temp;
3139
3140	/*
3141	 * No need for CD if all slots have a non-error GPIO
3142	 * as well as broken card detection is found.
3143	 */
3144	if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3145		return;
3146
3147	if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3148		spin_lock_irqsave(&host->irq_lock, irqflags);
3149		temp = mci_readl(host, INTMASK);
3150		temp  |= SDMMC_INT_CD;
3151		mci_writel(host, INTMASK, temp);
3152		spin_unlock_irqrestore(&host->irq_lock, irqflags);
3153	}
3154}
3155
3156int dw_mci_probe(struct dw_mci *host)
3157{
3158	const struct dw_mci_drv_data *drv_data = host->drv_data;
3159	int width, i, ret = 0;
3160	u32 fifo_size;
 
3161
3162	if (!host->pdata) {
3163		host->pdata = dw_mci_parse_dt(host);
3164		if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
3165			return -EPROBE_DEFER;
3166		} else if (IS_ERR(host->pdata)) {
3167			dev_err(host->dev, "platform data not available\n");
3168			return -EINVAL;
3169		}
3170	}
3171
 
 
 
 
 
 
3172	host->biu_clk = devm_clk_get(host->dev, "biu");
3173	if (IS_ERR(host->biu_clk)) {
3174		dev_dbg(host->dev, "biu clock not available\n");
3175	} else {
3176		ret = clk_prepare_enable(host->biu_clk);
3177		if (ret) {
3178			dev_err(host->dev, "failed to enable biu clock\n");
3179			return ret;
3180		}
3181	}
3182
3183	host->ciu_clk = devm_clk_get(host->dev, "ciu");
3184	if (IS_ERR(host->ciu_clk)) {
3185		dev_dbg(host->dev, "ciu clock not available\n");
3186		host->bus_hz = host->pdata->bus_hz;
3187	} else {
3188		ret = clk_prepare_enable(host->ciu_clk);
3189		if (ret) {
3190			dev_err(host->dev, "failed to enable ciu clock\n");
3191			goto err_clk_biu;
3192		}
3193
3194		if (host->pdata->bus_hz) {
3195			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3196			if (ret)
3197				dev_warn(host->dev,
3198					 "Unable to set bus rate to %uHz\n",
3199					 host->pdata->bus_hz);
3200		}
3201		host->bus_hz = clk_get_rate(host->ciu_clk);
3202	}
3203
3204	if (!host->bus_hz) {
3205		dev_err(host->dev,
3206			"Platform data must supply bus speed\n");
3207		ret = -ENODEV;
3208		goto err_clk_ciu;
 
 
3209	}
3210
3211	if (!IS_ERR(host->pdata->rstc)) {
3212		reset_control_assert(host->pdata->rstc);
3213		usleep_range(10, 50);
3214		reset_control_deassert(host->pdata->rstc);
 
 
 
3215	}
3216
3217	if (drv_data && drv_data->init) {
3218		ret = drv_data->init(host);
 
 
 
 
 
 
 
 
3219		if (ret) {
3220			dev_err(host->dev,
3221				"implementation specific init failed\n");
 
3222			goto err_clk_ciu;
3223		}
3224	}
3225
3226	timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3227	timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3228	timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
 
 
 
 
 
3229
3230	spin_lock_init(&host->lock);
3231	spin_lock_init(&host->irq_lock);
3232	INIT_LIST_HEAD(&host->queue);
3233
3234	/*
3235	 * Get the host data width - this assumes that HCON has been set with
3236	 * the correct values.
3237	 */
3238	i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3239	if (!i) {
3240		host->push_data = dw_mci_push_data16;
3241		host->pull_data = dw_mci_pull_data16;
3242		width = 16;
3243		host->data_shift = 1;
3244	} else if (i == 2) {
3245		host->push_data = dw_mci_push_data64;
3246		host->pull_data = dw_mci_pull_data64;
3247		width = 64;
3248		host->data_shift = 3;
3249	} else {
3250		/* Check for a reserved value, and warn if it is */
3251		WARN((i != 1),
3252		     "HCON reports a reserved host data width!\n"
3253		     "Defaulting to 32-bit access.\n");
3254		host->push_data = dw_mci_push_data32;
3255		host->pull_data = dw_mci_pull_data32;
3256		width = 32;
3257		host->data_shift = 2;
3258	}
3259
3260	/* Reset all blocks */
3261	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3262		ret = -ENODEV;
3263		goto err_clk_ciu;
3264	}
3265
3266	host->dma_ops = host->pdata->dma_ops;
3267	dw_mci_init_dma(host);
3268
3269	/* Clear the interrupts for the host controller */
3270	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3271	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3272
3273	/* Put in max timeout */
3274	mci_writel(host, TMOUT, 0xFFFFFFFF);
3275
3276	/*
3277	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
3278	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
3279	 */
3280	if (!host->pdata->fifo_depth) {
3281		/*
3282		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3283		 * have been overwritten by the bootloader, just like we're
3284		 * about to do, so if you know the value for your hardware, you
3285		 * should put it in the platform data.
3286		 */
3287		fifo_size = mci_readl(host, FIFOTH);
3288		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3289	} else {
3290		fifo_size = host->pdata->fifo_depth;
3291	}
3292	host->fifo_depth = fifo_size;
3293	host->fifoth_val =
3294		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3295	mci_writel(host, FIFOTH, host->fifoth_val);
3296
3297	/* disable clock to CIU */
3298	mci_writel(host, CLKENA, 0);
3299	mci_writel(host, CLKSRC, 0);
3300
3301	/*
3302	 * In 2.40a spec, Data offset is changed.
3303	 * Need to check the version-id and set data-offset for DATA register.
3304	 */
3305	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3306	dev_info(host->dev, "Version ID is %04x\n", host->verid);
3307
3308	if (host->data_addr_override)
3309		host->fifo_reg = host->regs + host->data_addr_override;
3310	else if (host->verid < DW_MMC_240A)
3311		host->fifo_reg = host->regs + DATA_OFFSET;
3312	else
3313		host->fifo_reg = host->regs + DATA_240A_OFFSET;
3314
3315	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
 
 
 
 
 
 
 
3316	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3317			       host->irq_flags, "dw-mci", host);
3318	if (ret)
3319		goto err_dmaunmap;
 
 
 
 
 
3320
3321	/*
3322	 * Enable interrupts for command done, data over, data empty,
3323	 * receive ready and error such as transmit, receive timeout, crc error
3324	 */
 
3325	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3326		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3327		   DW_MCI_ERROR_FLAGS);
3328	/* Enable mci interrupt */
3329	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3330
3331	dev_info(host->dev,
3332		 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
 
3333		 host->irq, width, fifo_size);
3334
3335	/* We need at least one slot to succeed */
3336	ret = dw_mci_init_slot(host);
3337	if (ret) {
3338		dev_dbg(host->dev, "slot %d init failed\n", i);
3339		goto err_dmaunmap;
 
 
 
 
 
 
 
 
 
 
3340	}
3341
3342	/* Now that slots are all setup, we can enable card detect */
3343	dw_mci_enable_cd(host);
3344
3345	return 0;
3346
 
 
 
3347err_dmaunmap:
3348	if (host->use_dma && host->dma_ops->exit)
3349		host->dma_ops->exit(host);
3350
3351	if (!IS_ERR(host->pdata->rstc))
3352		reset_control_assert(host->pdata->rstc);
 
3353
3354err_clk_ciu:
3355	clk_disable_unprepare(host->ciu_clk);
 
3356
3357err_clk_biu:
3358	clk_disable_unprepare(host->biu_clk);
 
3359
3360	return ret;
3361}
3362EXPORT_SYMBOL(dw_mci_probe);
3363
3364void dw_mci_remove(struct dw_mci *host)
3365{
3366	dev_dbg(host->dev, "remove slot\n");
3367	if (host->slot)
3368		dw_mci_cleanup_slot(host->slot);
3369
3370	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3371	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3372
 
 
 
 
 
 
3373	/* disable clock to CIU */
3374	mci_writel(host, CLKENA, 0);
3375	mci_writel(host, CLKSRC, 0);
3376
 
 
3377	if (host->use_dma && host->dma_ops->exit)
3378		host->dma_ops->exit(host);
3379
3380	if (!IS_ERR(host->pdata->rstc))
3381		reset_control_assert(host->pdata->rstc);
3382
3383	clk_disable_unprepare(host->ciu_clk);
3384	clk_disable_unprepare(host->biu_clk);
 
 
 
3385}
3386EXPORT_SYMBOL(dw_mci_remove);
3387
3388
3389
3390#ifdef CONFIG_PM
3391int dw_mci_runtime_suspend(struct device *dev)
 
 
 
3392{
3393	struct dw_mci *host = dev_get_drvdata(dev);
3394
3395	if (host->use_dma && host->dma_ops->exit)
3396		host->dma_ops->exit(host);
3397
3398	clk_disable_unprepare(host->ciu_clk);
3399
3400	if (host->slot &&
3401	    (mmc_can_gpio_cd(host->slot->mmc) ||
3402	     !mmc_card_is_removable(host->slot->mmc)))
3403		clk_disable_unprepare(host->biu_clk);
3404
3405	return 0;
3406}
3407EXPORT_SYMBOL(dw_mci_runtime_suspend);
3408
3409int dw_mci_runtime_resume(struct device *dev)
3410{
3411	int ret = 0;
3412	struct dw_mci *host = dev_get_drvdata(dev);
3413
3414	if (host->slot &&
3415	    (mmc_can_gpio_cd(host->slot->mmc) ||
3416	     !mmc_card_is_removable(host->slot->mmc))) {
3417		ret = clk_prepare_enable(host->biu_clk);
3418		if (ret)
3419			return ret;
 
3420	}
3421
3422	ret = clk_prepare_enable(host->ciu_clk);
3423	if (ret)
3424		goto err;
3425
3426	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3427		clk_disable_unprepare(host->ciu_clk);
3428		ret = -ENODEV;
3429		goto err;
3430	}
3431
3432	if (host->use_dma && host->dma_ops->init)
3433		host->dma_ops->init(host);
3434
3435	/*
3436	 * Restore the initial value at FIFOTH register
3437	 * And Invalidate the prev_blksz with zero
3438	 */
3439	mci_writel(host, FIFOTH, host->fifoth_val);
3440	host->prev_blksz = 0;
3441
3442	/* Put in max timeout */
3443	mci_writel(host, TMOUT, 0xFFFFFFFF);
3444
3445	mci_writel(host, RINTSTS, 0xFFFFFFFF);
3446	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3447		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3448		   DW_MCI_ERROR_FLAGS);
3449	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3450
3451
3452	if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3453		dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3454
3455	/* Force setup bus to guarantee available clock output */
3456	dw_mci_setup_bus(host->slot, true);
3457
3458	/* Re-enable SDIO interrupts. */
3459	if (sdio_irq_claimed(host->slot->mmc))
3460		__dw_mci_enable_sdio_irq(host->slot, 1);
3461
3462	/* Now that slots are all setup, we can enable card detect */
3463	dw_mci_enable_cd(host);
3464
3465	return 0;
3466
3467err:
3468	if (host->slot &&
3469	    (mmc_can_gpio_cd(host->slot->mmc) ||
3470	     !mmc_card_is_removable(host->slot->mmc)))
3471		clk_disable_unprepare(host->biu_clk);
3472
3473	return ret;
3474}
3475EXPORT_SYMBOL(dw_mci_runtime_resume);
3476#endif /* CONFIG_PM */
3477
3478static int __init dw_mci_init(void)
3479{
3480	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3481	return 0;
3482}
3483
3484static void __exit dw_mci_exit(void)
3485{
3486}
3487
3488module_init(dw_mci_init);
3489module_exit(dw_mci_exit);
3490
3491MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3492MODULE_AUTHOR("NXP Semiconductor VietNam");
3493MODULE_AUTHOR("Imagination Technologies Ltd");
3494MODULE_LICENSE("GPL v2");