Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
   4 *
   5 *  This is a driver for the SDHC controller found in Freescale MX2/MX3
   6 *  SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
   7 *  Unlike the hardware found on MX1, this hardware just works and does
   8 *  not need all the quirks found in imxmmc.c, hence the separate driver.
   9 *
  10 *  Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  11 *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
  12 *
  13 *  derived from pxamci.c by Russell King
 
 
 
 
 
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/ioport.h>
  19#include <linux/platform_device.h>
  20#include <linux/highmem.h>
  21#include <linux/interrupt.h>
  22#include <linux/irq.h>
  23#include <linux/blkdev.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/mmc/host.h>
  26#include <linux/mmc/card.h>
  27#include <linux/delay.h>
  28#include <linux/clk.h>
  29#include <linux/io.h>
 
  30#include <linux/regulator/consumer.h>
  31#include <linux/dmaengine.h>
  32#include <linux/types.h>
  33#include <linux/of.h>
 
  34#include <linux/of_dma.h>
 
  35#include <linux/mmc/slot-gpio.h>
  36
  37#include <asm/dma.h>
  38#include <asm/irq.h>
  39#include <linux/platform_data/mmc-mxcmmc.h>
  40
  41#include <linux/dma/imx-dma.h>
  42
  43#define DRIVER_NAME "mxc-mmc"
  44#define MXCMCI_TIMEOUT_MS 10000
  45
  46#define MMC_REG_STR_STP_CLK		0x00
  47#define MMC_REG_STATUS			0x04
  48#define MMC_REG_CLK_RATE		0x08
  49#define MMC_REG_CMD_DAT_CONT		0x0C
  50#define MMC_REG_RES_TO			0x10
  51#define MMC_REG_READ_TO			0x14
  52#define MMC_REG_BLK_LEN			0x18
  53#define MMC_REG_NOB			0x1C
  54#define MMC_REG_REV_NO			0x20
  55#define MMC_REG_INT_CNTR		0x24
  56#define MMC_REG_CMD			0x28
  57#define MMC_REG_ARG			0x2C
  58#define MMC_REG_RES_FIFO		0x34
  59#define MMC_REG_BUFFER_ACCESS		0x38
  60
  61#define STR_STP_CLK_RESET               (1 << 3)
  62#define STR_STP_CLK_START_CLK           (1 << 1)
  63#define STR_STP_CLK_STOP_CLK            (1 << 0)
  64
  65#define STATUS_CARD_INSERTION		(1 << 31)
  66#define STATUS_CARD_REMOVAL		(1 << 30)
  67#define STATUS_YBUF_EMPTY		(1 << 29)
  68#define STATUS_XBUF_EMPTY		(1 << 28)
  69#define STATUS_YBUF_FULL		(1 << 27)
  70#define STATUS_XBUF_FULL		(1 << 26)
  71#define STATUS_BUF_UND_RUN		(1 << 25)
  72#define STATUS_BUF_OVFL			(1 << 24)
  73#define STATUS_SDIO_INT_ACTIVE		(1 << 14)
  74#define STATUS_END_CMD_RESP		(1 << 13)
  75#define STATUS_WRITE_OP_DONE		(1 << 12)
  76#define STATUS_DATA_TRANS_DONE		(1 << 11)
  77#define STATUS_READ_OP_DONE		(1 << 11)
  78#define STATUS_WR_CRC_ERROR_CODE_MASK	(3 << 10)
  79#define STATUS_CARD_BUS_CLK_RUN		(1 << 8)
  80#define STATUS_BUF_READ_RDY		(1 << 7)
  81#define STATUS_BUF_WRITE_RDY		(1 << 6)
  82#define STATUS_RESP_CRC_ERR		(1 << 5)
  83#define STATUS_CRC_READ_ERR		(1 << 3)
  84#define STATUS_CRC_WRITE_ERR		(1 << 2)
  85#define STATUS_TIME_OUT_RESP		(1 << 1)
  86#define STATUS_TIME_OUT_READ		(1 << 0)
  87#define STATUS_ERR_MASK			0x2f
  88
  89#define CMD_DAT_CONT_CMD_RESP_LONG_OFF	(1 << 12)
  90#define CMD_DAT_CONT_STOP_READWAIT	(1 << 11)
  91#define CMD_DAT_CONT_START_READWAIT	(1 << 10)
  92#define CMD_DAT_CONT_BUS_WIDTH_4	(2 << 8)
  93#define CMD_DAT_CONT_INIT		(1 << 7)
  94#define CMD_DAT_CONT_WRITE		(1 << 4)
  95#define CMD_DAT_CONT_DATA_ENABLE	(1 << 3)
  96#define CMD_DAT_CONT_RESPONSE_48BIT_CRC	(1 << 0)
  97#define CMD_DAT_CONT_RESPONSE_136BIT	(2 << 0)
  98#define CMD_DAT_CONT_RESPONSE_48BIT	(3 << 0)
  99
 100#define INT_SDIO_INT_WKP_EN		(1 << 18)
 101#define INT_CARD_INSERTION_WKP_EN	(1 << 17)
 102#define INT_CARD_REMOVAL_WKP_EN		(1 << 16)
 103#define INT_CARD_INSERTION_EN		(1 << 15)
 104#define INT_CARD_REMOVAL_EN		(1 << 14)
 105#define INT_SDIO_IRQ_EN			(1 << 13)
 106#define INT_DAT0_EN			(1 << 12)
 107#define INT_BUF_READ_EN			(1 << 4)
 108#define INT_BUF_WRITE_EN		(1 << 3)
 109#define INT_END_CMD_RES_EN		(1 << 2)
 110#define INT_WRITE_OP_DONE_EN		(1 << 1)
 111#define INT_READ_OP_EN			(1 << 0)
 112
 113enum mxcmci_type {
 114	IMX21_MMC,
 115	IMX31_MMC,
 116	MPC512X_MMC,
 117};
 118
 119struct mxcmci_host {
 120	struct mmc_host		*mmc;
 121	void __iomem		*base;
 122	dma_addr_t		phys_base;
 123	int			detect_irq;
 124	struct dma_chan		*dma;
 125	struct dma_async_tx_descriptor *desc;
 126	int			do_dma;
 127	int			default_irq_mask;
 128	int			use_sdio;
 129	unsigned int		power_mode;
 130	struct imxmmc_platform_data *pdata;
 131
 132	struct mmc_request	*req;
 133	struct mmc_command	*cmd;
 134	struct mmc_data		*data;
 135
 136	unsigned int		datasize;
 137	unsigned int		dma_dir;
 138
 139	u16			rev_no;
 140	unsigned int		cmdat;
 141
 142	struct clk		*clk_ipg;
 143	struct clk		*clk_per;
 144
 145	int			clock;
 146
 147	struct work_struct	datawork;
 148	spinlock_t		lock;
 149
 150	int			burstlen;
 151	int			dmareq;
 152	struct dma_slave_config dma_slave_config;
 153	struct imx_dma_data	dma_data;
 154
 155	struct timer_list	watchdog;
 156	enum mxcmci_type	devtype;
 157};
 158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 159static const struct of_device_id mxcmci_of_match[] = {
 160	{
 161		.compatible = "fsl,imx21-mmc",
 162		.data = (void *) IMX21_MMC,
 163	}, {
 164		.compatible = "fsl,imx31-mmc",
 165		.data = (void *) IMX31_MMC,
 166	}, {
 167		.compatible = "fsl,mpc5121-sdhc",
 168		.data = (void *) MPC512X_MMC,
 169	}, {
 170		/* sentinel */
 171	}
 172};
 173MODULE_DEVICE_TABLE(of, mxcmci_of_match);
 174
 175static inline int is_imx31_mmc(struct mxcmci_host *host)
 176{
 177	return host->devtype == IMX31_MMC;
 178}
 179
 180static inline int is_mpc512x_mmc(struct mxcmci_host *host)
 181{
 182	return host->devtype == MPC512X_MMC;
 183}
 184
 185static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg)
 186{
 187	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 188		return ioread32be(host->base + reg);
 189	else
 190		return readl(host->base + reg);
 191}
 192
 193static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg)
 194{
 195	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 196		iowrite32be(val, host->base + reg);
 197	else
 198		writel(val, host->base + reg);
 199}
 200
 201static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg)
 202{
 203	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 204		return ioread32be(host->base + reg);
 205	else
 206		return readw(host->base + reg);
 207}
 208
 209static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
 210{
 211	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 212		iowrite32be(val, host->base + reg);
 213	else
 214		writew(val, host->base + reg);
 215}
 216
 217static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 218
 219static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd)
 220{
 221	if (!IS_ERR(host->mmc->supply.vmmc)) {
 222		if (host->power_mode == MMC_POWER_UP)
 223			mmc_regulator_set_ocr(host->mmc,
 224					      host->mmc->supply.vmmc, vdd);
 225		else if (host->power_mode == MMC_POWER_OFF)
 226			mmc_regulator_set_ocr(host->mmc,
 227					      host->mmc->supply.vmmc, 0);
 228	}
 229
 230	if (host->pdata && host->pdata->setpower)
 231		host->pdata->setpower(mmc_dev(host->mmc), vdd);
 232}
 233
 234static inline int mxcmci_use_dma(struct mxcmci_host *host)
 235{
 236	return host->do_dma;
 237}
 238
 239static void mxcmci_softreset(struct mxcmci_host *host)
 240{
 241	int i;
 242
 243	dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
 244
 245	/* reset sequence */
 246	mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK);
 247	mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
 248			MMC_REG_STR_STP_CLK);
 249
 250	for (i = 0; i < 8; i++)
 251		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 252
 253	mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
 254}
 255
 256#if IS_ENABLED(CONFIG_PPC_MPC512x)
 257static inline void buffer_swap32(u32 *buf, int len)
 258{
 259	int i;
 260
 261	for (i = 0; i < ((len + 3) / 4); i++) {
 262		*buf = swab32(*buf);
 263		buf++;
 264	}
 265}
 266
 267static void mxcmci_swap_buffers(struct mmc_data *data)
 268{
 269	struct sg_mapping_iter sgm;
 270	u32 *buf;
 271
 272	sg_miter_start(&sgm, data->sg, data->sg_len,
 273		       SG_MITER_TO_SG | SG_MITER_FROM_SG);
 274
 275	while (sg_miter_next(&sgm)) {
 276		buf = sgm.addr;
 277		buffer_swap32(buf, sgm.length);
 278	}
 279
 280	sg_miter_stop(&sgm);
 
 281}
 282#else
 283static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
 284#endif
 285
 286static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 287{
 288	unsigned int nob = data->blocks;
 289	unsigned int blksz = data->blksz;
 290	unsigned int datasize = nob * blksz;
 291	struct scatterlist *sg;
 292	enum dma_transfer_direction slave_dirn;
 293	int i, nents;
 294
 295	host->data = data;
 296	data->bytes_xfered = 0;
 297
 298	mxcmci_writew(host, nob, MMC_REG_NOB);
 299	mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);
 300	host->datasize = datasize;
 301
 302	if (!mxcmci_use_dma(host))
 303		return 0;
 304
 305	for_each_sg(data->sg, sg, data->sg_len, i) {
 306		if (sg->offset & 3 || sg->length & 3 || sg->length < 512) {
 307			host->do_dma = 0;
 308			return 0;
 309		}
 310	}
 311
 312	if (data->flags & MMC_DATA_READ) {
 313		host->dma_dir = DMA_FROM_DEVICE;
 314		slave_dirn = DMA_DEV_TO_MEM;
 315	} else {
 316		host->dma_dir = DMA_TO_DEVICE;
 317		slave_dirn = DMA_MEM_TO_DEV;
 318
 319		mxcmci_swap_buffers(data);
 320	}
 321
 322	nents = dma_map_sg(host->dma->device->dev, data->sg,
 323				     data->sg_len,  host->dma_dir);
 324	if (nents != data->sg_len)
 325		return -EINVAL;
 326
 327	host->desc = dmaengine_prep_slave_sg(host->dma,
 328		data->sg, data->sg_len, slave_dirn,
 329		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 330
 331	if (!host->desc) {
 332		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 333				host->dma_dir);
 334		host->do_dma = 0;
 335		return 0; /* Fall back to PIO */
 336	}
 337	wmb();
 338
 339	dmaengine_submit(host->desc);
 340	dma_async_issue_pending(host->dma);
 341
 342	mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
 343
 344	return 0;
 345}
 346
 347static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
 348static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
 349
 350static void mxcmci_dma_callback(void *data)
 351{
 352	struct mxcmci_host *host = data;
 353	u32 stat;
 354
 355	del_timer(&host->watchdog);
 356
 357	stat = mxcmci_readl(host, MMC_REG_STATUS);
 358
 359	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 360
 361	mxcmci_data_done(host, stat);
 362}
 363
 364static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
 365		unsigned int cmdat)
 366{
 367	u32 int_cntr = host->default_irq_mask;
 368	unsigned long flags;
 369
 370	WARN_ON(host->cmd != NULL);
 371	host->cmd = cmd;
 372
 373	switch (mmc_resp_type(cmd)) {
 374	case MMC_RSP_R1: /* short CRC, OPCODE */
 375	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
 376		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
 377		break;
 378	case MMC_RSP_R2: /* long 136 bit + CRC */
 379		cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
 380		break;
 381	case MMC_RSP_R3: /* short */
 382		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
 383		break;
 384	case MMC_RSP_NONE:
 385		break;
 386	default:
 387		dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
 388				mmc_resp_type(cmd));
 389		cmd->error = -EINVAL;
 390		return -EINVAL;
 391	}
 392
 393	int_cntr = INT_END_CMD_RES_EN;
 394
 395	if (mxcmci_use_dma(host)) {
 396		if (host->dma_dir == DMA_FROM_DEVICE) {
 397			host->desc->callback = mxcmci_dma_callback;
 398			host->desc->callback_param = host;
 399		} else {
 400			int_cntr |= INT_WRITE_OP_DONE_EN;
 401		}
 402	}
 403
 404	spin_lock_irqsave(&host->lock, flags);
 405	if (host->use_sdio)
 406		int_cntr |= INT_SDIO_IRQ_EN;
 407	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 408	spin_unlock_irqrestore(&host->lock, flags);
 409
 410	mxcmci_writew(host, cmd->opcode, MMC_REG_CMD);
 411	mxcmci_writel(host, cmd->arg, MMC_REG_ARG);
 412	mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);
 413
 414	return 0;
 415}
 416
 417static void mxcmci_finish_request(struct mxcmci_host *host,
 418		struct mmc_request *req)
 419{
 420	u32 int_cntr = host->default_irq_mask;
 421	unsigned long flags;
 422
 423	spin_lock_irqsave(&host->lock, flags);
 424	if (host->use_sdio)
 425		int_cntr |= INT_SDIO_IRQ_EN;
 426	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 427	spin_unlock_irqrestore(&host->lock, flags);
 428
 429	host->req = NULL;
 430	host->cmd = NULL;
 431	host->data = NULL;
 432
 433	mmc_request_done(host->mmc, req);
 434}
 435
 436static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
 437{
 438	struct mmc_data *data = host->data;
 439	int data_error;
 440
 441	if (mxcmci_use_dma(host)) {
 442		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 443				host->dma_dir);
 444		mxcmci_swap_buffers(data);
 445	}
 446
 447	if (stat & STATUS_ERR_MASK) {
 448		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
 449				stat);
 450		if (stat & STATUS_CRC_READ_ERR) {
 451			dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
 452			data->error = -EILSEQ;
 453		} else if (stat & STATUS_CRC_WRITE_ERR) {
 454			u32 err_code = (stat >> 9) & 0x3;
 455			if (err_code == 2) { /* No CRC response */
 456				dev_err(mmc_dev(host->mmc),
 457					"%s: No CRC -ETIMEDOUT\n", __func__);
 458				data->error = -ETIMEDOUT;
 459			} else {
 460				dev_err(mmc_dev(host->mmc),
 461					"%s: -EILSEQ\n", __func__);
 462				data->error = -EILSEQ;
 463			}
 464		} else if (stat & STATUS_TIME_OUT_READ) {
 465			dev_err(mmc_dev(host->mmc),
 466				"%s: read -ETIMEDOUT\n", __func__);
 467			data->error = -ETIMEDOUT;
 468		} else {
 469			dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
 470			data->error = -EIO;
 471		}
 472	} else {
 473		data->bytes_xfered = host->datasize;
 474	}
 475
 476	data_error = data->error;
 477
 478	host->data = NULL;
 479
 480	return data_error;
 481}
 482
 483static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
 484{
 485	struct mmc_command *cmd = host->cmd;
 486	int i;
 487	u32 a, b, c;
 488
 489	if (!cmd)
 490		return;
 491
 492	if (stat & STATUS_TIME_OUT_RESP) {
 493		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
 494		cmd->error = -ETIMEDOUT;
 495	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
 496		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
 497		cmd->error = -EILSEQ;
 498	}
 499
 500	if (cmd->flags & MMC_RSP_PRESENT) {
 501		if (cmd->flags & MMC_RSP_136) {
 502			for (i = 0; i < 4; i++) {
 503				a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 504				b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 505				cmd->resp[i] = a << 16 | b;
 506			}
 507		} else {
 508			a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 509			b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 510			c = mxcmci_readw(host, MMC_REG_RES_FIFO);
 511			cmd->resp[0] = a << 24 | b << 8 | c >> 8;
 512		}
 513	}
 514}
 515
 516static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
 517{
 518	u32 stat;
 519	unsigned long timeout = jiffies + HZ;
 520
 521	do {
 522		stat = mxcmci_readl(host, MMC_REG_STATUS);
 523		if (stat & STATUS_ERR_MASK)
 524			return stat;
 525		if (time_after(jiffies, timeout)) {
 526			mxcmci_softreset(host);
 527			mxcmci_set_clk_rate(host, host->clock);
 528			return STATUS_TIME_OUT_READ;
 529		}
 530		if (stat & mask)
 531			return 0;
 532		cpu_relax();
 533	} while (1);
 534}
 535
 536static int mxcmci_pull(struct mxcmci_host *host, u32 *buf, int bytes)
 537{
 538	unsigned int stat;
 
 539
 540	while (bytes > 3) {
 541		stat = mxcmci_poll_status(host,
 542				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 543		if (stat)
 544			return stat;
 545		*buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 546		bytes -= 4;
 547	}
 548
 549	if (bytes) {
 550		u8 *b = (u8 *)buf;
 551		u32 tmp;
 552
 553		stat = mxcmci_poll_status(host,
 554				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 555		if (stat)
 556			return stat;
 557		tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 558		memcpy(b, &tmp, bytes);
 559	}
 560
 561	return 0;
 562}
 563
 564static int mxcmci_push(struct mxcmci_host *host, u32 *buf, int bytes)
 565{
 566	unsigned int stat;
 
 567
 568	while (bytes > 3) {
 569		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 570		if (stat)
 571			return stat;
 572		mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);
 573		bytes -= 4;
 574	}
 575
 576	if (bytes) {
 577		u8 *b = (u8 *)buf;
 578		u32 tmp;
 579
 580		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 581		if (stat)
 582			return stat;
 583
 584		memcpy(&tmp, b, bytes);
 585		mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
 586	}
 587
 588	return mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 589}
 590
 591static int mxcmci_transfer_data(struct mxcmci_host *host)
 592{
 593	struct mmc_data *data = host->req->data;
 594	struct sg_mapping_iter sgm;
 595	int stat;
 596	u32 *buf;
 597
 598	host->data = data;
 599	host->datasize = 0;
 600	sg_miter_start(&sgm, data->sg, data->sg_len,
 601		       (data->flags & MMC_DATA_READ) ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
 602
 603	if (data->flags & MMC_DATA_READ) {
 604		while (sg_miter_next(&sgm)) {
 605			buf = sgm.addr;
 606			stat = mxcmci_pull(host, buf, sgm.length);
 607			if (stat)
 608				goto transfer_error;
 609			host->datasize += sgm.length;
 610		}
 611	} else {
 612		while (sg_miter_next(&sgm)) {
 613			buf = sgm.addr;
 614			stat = mxcmci_push(host, buf, sgm.length);
 615			if (stat)
 616				goto transfer_error;
 617			host->datasize += sgm.length;
 618		}
 619		stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
 620		if (stat)
 621			goto transfer_error;
 622	}
 623
 624transfer_error:
 625	sg_miter_stop(&sgm);
 626	return stat;
 627}
 628
 629static void mxcmci_datawork(struct work_struct *work)
 630{
 631	struct mxcmci_host *host = container_of(work, struct mxcmci_host,
 632						  datawork);
 633	int datastat = mxcmci_transfer_data(host);
 634
 635	mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 636		MMC_REG_STATUS);
 637	mxcmci_finish_data(host, datastat);
 638
 639	if (host->req->stop) {
 640		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 641			mxcmci_finish_request(host, host->req);
 642			return;
 643		}
 644	} else {
 645		mxcmci_finish_request(host, host->req);
 646	}
 647}
 648
 649static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
 650{
 651	struct mmc_request *req;
 652	int data_error;
 653	unsigned long flags;
 654
 655	spin_lock_irqsave(&host->lock, flags);
 656
 657	if (!host->data) {
 658		spin_unlock_irqrestore(&host->lock, flags);
 659		return;
 660	}
 661
 662	if (!host->req) {
 663		spin_unlock_irqrestore(&host->lock, flags);
 664		return;
 665	}
 666
 667	req = host->req;
 668	if (!req->stop)
 669		host->req = NULL; /* we will handle finish req below */
 670
 671	data_error = mxcmci_finish_data(host, stat);
 672
 673	spin_unlock_irqrestore(&host->lock, flags);
 674
 675	if (data_error)
 676		return;
 677
 678	mxcmci_read_response(host, stat);
 679	host->cmd = NULL;
 680
 681	if (req->stop) {
 682		if (mxcmci_start_cmd(host, req->stop, 0)) {
 683			mxcmci_finish_request(host, req);
 684			return;
 685		}
 686	} else {
 687		mxcmci_finish_request(host, req);
 688	}
 689}
 690
 691static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
 692{
 693	mxcmci_read_response(host, stat);
 694	host->cmd = NULL;
 695
 696	if (!host->data && host->req) {
 697		mxcmci_finish_request(host, host->req);
 698		return;
 699	}
 700
 701	/* For the DMA case the DMA engine handles the data transfer
 702	 * automatically. For non DMA we have to do it ourselves.
 703	 * Don't do it in interrupt context though.
 704	 */
 705	if (!mxcmci_use_dma(host) && host->data)
 706		schedule_work(&host->datawork);
 707
 708}
 709
 710static irqreturn_t mxcmci_irq(int irq, void *devid)
 711{
 712	struct mxcmci_host *host = devid;
 
 713	bool sdio_irq;
 714	u32 stat;
 715
 716	stat = mxcmci_readl(host, MMC_REG_STATUS);
 717	mxcmci_writel(host,
 718		stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
 719			 STATUS_WRITE_OP_DONE),
 720		MMC_REG_STATUS);
 721
 722	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 723
 724	spin_lock(&host->lock);
 725	sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
 726	spin_unlock(&host->lock);
 727
 728	if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE)))
 729		mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS);
 730
 731	if (sdio_irq) {
 732		mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
 733		mmc_signal_sdio_irq(host->mmc);
 734	}
 735
 736	if (stat & STATUS_END_CMD_RESP)
 737		mxcmci_cmd_done(host, stat);
 738
 739	if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) {
 740		del_timer(&host->watchdog);
 741		mxcmci_data_done(host, stat);
 742	}
 743
 744	if (host->default_irq_mask &&
 745		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
 746		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
 747
 748	return IRQ_HANDLED;
 749}
 750
 751static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
 752{
 753	struct mxcmci_host *host = mmc_priv(mmc);
 754	unsigned int cmdat = host->cmdat;
 755	int error;
 756
 757	WARN_ON(host->req != NULL);
 758
 759	host->req = req;
 760	host->cmdat &= ~CMD_DAT_CONT_INIT;
 761
 762	if (host->dma)
 763		host->do_dma = 1;
 764
 765	if (req->data) {
 766		error = mxcmci_setup_data(host, req->data);
 767		if (error) {
 768			req->cmd->error = error;
 769			goto out;
 770		}
 771
 772
 773		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
 774
 775		if (req->data->flags & MMC_DATA_WRITE)
 776			cmdat |= CMD_DAT_CONT_WRITE;
 777	}
 778
 779	error = mxcmci_start_cmd(host, req->cmd, cmdat);
 780
 781out:
 782	if (error)
 783		mxcmci_finish_request(host, req);
 784}
 785
 786static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
 787{
 788	unsigned int divider;
 789	int prescaler = 0;
 790	unsigned int clk_in = clk_get_rate(host->clk_per);
 791
 792	while (prescaler <= 0x800) {
 793		for (divider = 1; divider <= 0xF; divider++) {
 794			int x;
 795
 796			x = (clk_in / (divider + 1));
 797
 798			if (prescaler)
 799				x /= (prescaler * 2);
 800
 801			if (x <= clk_ios)
 802				break;
 803		}
 804		if (divider < 0x10)
 805			break;
 806
 807		if (prescaler == 0)
 808			prescaler = 1;
 809		else
 810			prescaler <<= 1;
 811	}
 812
 813	mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);
 814
 815	dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
 816			prescaler, divider, clk_in, clk_ios);
 817}
 818
 819static int mxcmci_setup_dma(struct mmc_host *mmc)
 820{
 821	struct mxcmci_host *host = mmc_priv(mmc);
 822	struct dma_slave_config *config = &host->dma_slave_config;
 823
 824	config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 825	config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 826	config->dst_addr_width = 4;
 827	config->src_addr_width = 4;
 828	config->dst_maxburst = host->burstlen;
 829	config->src_maxburst = host->burstlen;
 830	config->device_fc = false;
 831
 832	return dmaengine_slave_config(host->dma, config);
 833}
 834
 835static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 836{
 837	struct mxcmci_host *host = mmc_priv(mmc);
 838	int burstlen, ret;
 839
 840	/*
 841	 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value  0)
 842	 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
 843	 */
 844	if (ios->bus_width == MMC_BUS_WIDTH_4)
 845		burstlen = 16;
 846	else
 847		burstlen = 4;
 848
 849	if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
 850		host->burstlen = burstlen;
 851		ret = mxcmci_setup_dma(mmc);
 852		if (ret) {
 853			dev_err(mmc_dev(host->mmc),
 854				"failed to config DMA channel. Falling back to PIO\n");
 855			dma_release_channel(host->dma);
 856			host->do_dma = 0;
 857			host->dma = NULL;
 858		}
 859	}
 860
 861	if (ios->bus_width == MMC_BUS_WIDTH_4)
 862		host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
 863	else
 864		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 865
 866	if (host->power_mode != ios->power_mode) {
 867		host->power_mode = ios->power_mode;
 868		mxcmci_set_power(host, ios->vdd);
 869
 870		if (ios->power_mode == MMC_POWER_ON)
 871			host->cmdat |= CMD_DAT_CONT_INIT;
 872	}
 873
 874	if (ios->clock) {
 875		mxcmci_set_clk_rate(host, ios->clock);
 876		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 877	} else {
 878		mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);
 879	}
 880
 881	host->clock = ios->clock;
 882}
 883
 884static irqreturn_t mxcmci_detect_irq(int irq, void *data)
 885{
 886	struct mmc_host *mmc = data;
 887
 888	dev_dbg(mmc_dev(mmc), "%s\n", __func__);
 889
 890	mmc_detect_change(mmc, msecs_to_jiffies(250));
 891	return IRQ_HANDLED;
 892}
 893
 894static int mxcmci_get_ro(struct mmc_host *mmc)
 895{
 896	struct mxcmci_host *host = mmc_priv(mmc);
 897
 898	if (host->pdata && host->pdata->get_ro)
 899		return !!host->pdata->get_ro(mmc_dev(mmc));
 900	/*
 901	 * If board doesn't support read only detection (no mmc_gpio
 902	 * context or gpio is invalid), then let the mmc core decide
 903	 * what to do.
 904	 */
 905	return mmc_gpio_get_ro(mmc);
 906}
 907
 908static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 909{
 910	struct mxcmci_host *host = mmc_priv(mmc);
 911	unsigned long flags;
 912	u32 int_cntr;
 913
 914	spin_lock_irqsave(&host->lock, flags);
 915	host->use_sdio = enable;
 916	int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);
 917
 918	if (enable)
 919		int_cntr |= INT_SDIO_IRQ_EN;
 920	else
 921		int_cntr &= ~INT_SDIO_IRQ_EN;
 922
 923	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 924	spin_unlock_irqrestore(&host->lock, flags);
 925}
 926
 927static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
 928{
 929	struct mxcmci_host *mxcmci = mmc_priv(host);
 930
 931	/*
 932	 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
 933	 * multi-block transfers when connected SDIO peripheral doesn't
 934	 * drive the BUSY line as required by the specs.
 935	 * One way to prevent this is to only allow 1-bit transfers.
 936	 */
 937
 938	if (is_imx31_mmc(mxcmci) && mmc_card_sdio(card))
 939		host->caps &= ~MMC_CAP_4_BIT_DATA;
 940	else
 941		host->caps |= MMC_CAP_4_BIT_DATA;
 942}
 943
 944static bool filter(struct dma_chan *chan, void *param)
 945{
 946	struct mxcmci_host *host = param;
 947
 948	if (!imx_dma_is_general_purpose(chan))
 949		return false;
 950
 951	chan->private = &host->dma_data;
 952
 953	return true;
 954}
 955
 956static void mxcmci_watchdog(struct timer_list *t)
 957{
 958	struct mxcmci_host *host = from_timer(host, t, watchdog);
 
 959	struct mmc_request *req = host->req;
 960	unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
 961
 962	if (host->dma_dir == DMA_FROM_DEVICE) {
 963		dmaengine_terminate_all(host->dma);
 964		dev_err(mmc_dev(host->mmc),
 965			"%s: read time out (status = 0x%08x)\n",
 966			__func__, stat);
 967	} else {
 968		dev_err(mmc_dev(host->mmc),
 969			"%s: write time out (status = 0x%08x)\n",
 970			__func__, stat);
 971		mxcmci_softreset(host);
 972	}
 973
 974	/* Mark transfer as erroneus and inform the upper layers */
 975
 976	if (host->data)
 977		host->data->error = -ETIMEDOUT;
 978	host->req = NULL;
 979	host->cmd = NULL;
 980	host->data = NULL;
 981	mmc_request_done(host->mmc, req);
 982}
 983
 984static const struct mmc_host_ops mxcmci_ops = {
 985	.request		= mxcmci_request,
 986	.set_ios		= mxcmci_set_ios,
 987	.get_ro			= mxcmci_get_ro,
 988	.enable_sdio_irq	= mxcmci_enable_sdio_irq,
 989	.init_card		= mxcmci_init_card,
 990};
 991
 992static int mxcmci_probe(struct platform_device *pdev)
 993{
 994	struct mmc_host *mmc;
 995	struct mxcmci_host *host;
 996	struct resource *res;
 997	int ret = 0, irq;
 998	bool dat3_card_detect = false;
 999	dma_cap_mask_t mask;
 
1000	struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
1001
1002	pr_info("i.MX/MPC512x SDHC driver\n");
1003
 
 
 
1004	irq = platform_get_irq(pdev, 0);
1005	if (irq < 0)
1006		return irq;
1007
1008	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1009	if (!mmc)
1010		return -ENOMEM;
1011
1012	host = mmc_priv(mmc);
1013
1014	host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1015	if (IS_ERR(host->base)) {
1016		ret = PTR_ERR(host->base);
1017		goto out_free;
1018	}
1019
1020	host->phys_base = res->start;
1021
1022	ret = mmc_of_parse(mmc);
1023	if (ret)
1024		goto out_free;
1025	mmc->ops = &mxcmci_ops;
1026
1027	/* For devicetree parsing, the bus width is read from devicetree */
1028	if (pdata)
1029		mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1030	else
1031		mmc->caps |= MMC_CAP_SDIO_IRQ;
1032
1033	/* MMC core transfer sizes tunable parameters */
1034	mmc->max_blk_size = 2048;
1035	mmc->max_blk_count = 65535;
1036	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1037	mmc->max_seg_size = mmc->max_req_size;
1038
1039	host->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
 
 
 
 
 
1040
1041	/* adjust max_segs after devtype detection */
1042	if (!is_mpc512x_mmc(host))
1043		mmc->max_segs = 64;
1044
1045	host->mmc = mmc;
1046	host->pdata = pdata;
1047	spin_lock_init(&host->lock);
1048
1049	if (pdata)
1050		dat3_card_detect = pdata->dat3_card_detect;
1051	else if (mmc_card_is_removable(mmc)
1052			&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
1053		dat3_card_detect = true;
1054
1055	ret = mmc_regulator_get_supply(mmc);
1056	if (ret)
1057		goto out_free;
1058
1059	if (!mmc->ocr_avail) {
1060		if (pdata && pdata->ocr_avail)
1061			mmc->ocr_avail = pdata->ocr_avail;
1062		else
1063			mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1064	}
1065
1066	if (dat3_card_detect)
1067		host->default_irq_mask =
1068			INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
1069	else
1070		host->default_irq_mask = 0;
1071
1072	host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1073	if (IS_ERR(host->clk_ipg)) {
1074		ret = PTR_ERR(host->clk_ipg);
1075		goto out_free;
1076	}
1077
1078	host->clk_per = devm_clk_get(&pdev->dev, "per");
1079	if (IS_ERR(host->clk_per)) {
1080		ret = PTR_ERR(host->clk_per);
1081		goto out_free;
1082	}
1083
1084	ret = clk_prepare_enable(host->clk_per);
1085	if (ret)
1086		goto out_free;
1087
1088	ret = clk_prepare_enable(host->clk_ipg);
1089	if (ret)
1090		goto out_clk_per_put;
1091
1092	mxcmci_softreset(host);
1093
1094	host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);
1095	if (host->rev_no != 0x400) {
1096		ret = -ENODEV;
1097		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1098			host->rev_no);
1099		goto out_clk_put;
1100	}
1101
1102	mmc->f_min = clk_get_rate(host->clk_per) >> 16;
1103	mmc->f_max = clk_get_rate(host->clk_per) >> 1;
1104
1105	/* recommended in data sheet */
1106	mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO);
1107
1108	mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
1109
1110	if (!host->pdata) {
1111		host->dma = dma_request_chan(&pdev->dev, "rx-tx");
1112		if (IS_ERR(host->dma)) {
1113			if (PTR_ERR(host->dma) == -EPROBE_DEFER) {
1114				ret = -EPROBE_DEFER;
1115				goto out_clk_put;
1116			}
1117
1118			/* Ignore errors to fall back to PIO mode */
1119			host->dma = NULL;
1120		}
1121	} else {
1122		res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1123		if (res) {
1124			host->dmareq = res->start;
1125			host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
1126			host->dma_data.priority = DMA_PRIO_LOW;
1127			host->dma_data.dma_request = host->dmareq;
1128			dma_cap_zero(mask);
1129			dma_cap_set(DMA_SLAVE, mask);
1130			host->dma = dma_request_channel(mask, filter, host);
1131		}
1132	}
1133	if (host->dma)
1134		mmc->max_seg_size = dma_get_max_seg_size(
1135				host->dma->device->dev);
1136	else
1137		dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
1138
1139	INIT_WORK(&host->datawork, mxcmci_datawork);
1140
1141	ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0,
1142			       dev_name(&pdev->dev), host);
1143	if (ret)
1144		goto out_free_dma;
1145
1146	platform_set_drvdata(pdev, mmc);
1147
1148	if (host->pdata && host->pdata->init) {
1149		ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
1150				host->mmc);
1151		if (ret)
1152			goto out_free_dma;
1153	}
1154
1155	timer_setup(&host->watchdog, mxcmci_watchdog, 0);
 
 
1156
1157	ret = mmc_add_host(mmc);
1158	if (ret)
1159		goto out_free_dma;
1160
1161	return 0;
1162
1163out_free_dma:
1164	if (host->dma)
1165		dma_release_channel(host->dma);
1166
1167out_clk_put:
1168	clk_disable_unprepare(host->clk_ipg);
1169out_clk_per_put:
1170	clk_disable_unprepare(host->clk_per);
 
1171
1172out_free:
1173	mmc_free_host(mmc);
1174
1175	return ret;
1176}
1177
1178static void mxcmci_remove(struct platform_device *pdev)
1179{
1180	struct mmc_host *mmc = platform_get_drvdata(pdev);
1181	struct mxcmci_host *host = mmc_priv(mmc);
1182
1183	mmc_remove_host(mmc);
1184
1185	if (host->pdata && host->pdata->exit)
1186		host->pdata->exit(&pdev->dev, mmc);
1187
1188	if (host->dma)
1189		dma_release_channel(host->dma);
1190
1191	clk_disable_unprepare(host->clk_per);
1192	clk_disable_unprepare(host->clk_ipg);
1193
1194	mmc_free_host(mmc);
 
 
1195}
1196
1197static int mxcmci_suspend(struct device *dev)
1198{
1199	struct mmc_host *mmc = dev_get_drvdata(dev);
1200	struct mxcmci_host *host = mmc_priv(mmc);
1201
1202	clk_disable_unprepare(host->clk_per);
1203	clk_disable_unprepare(host->clk_ipg);
1204	return 0;
1205}
1206
1207static int mxcmci_resume(struct device *dev)
1208{
1209	struct mmc_host *mmc = dev_get_drvdata(dev);
1210	struct mxcmci_host *host = mmc_priv(mmc);
1211	int ret;
1212
1213	ret = clk_prepare_enable(host->clk_per);
1214	if (ret)
1215		return ret;
1216
1217	ret = clk_prepare_enable(host->clk_ipg);
1218	if (ret)
1219		clk_disable_unprepare(host->clk_per);
1220
1221	return ret;
1222}
1223
1224static DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
1225
1226static struct platform_driver mxcmci_driver = {
1227	.probe		= mxcmci_probe,
1228	.remove		= mxcmci_remove,
 
1229	.driver		= {
1230		.name		= DRIVER_NAME,
1231		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
1232		.pm	= pm_sleep_ptr(&mxcmci_pm_ops),
1233		.of_match_table	= mxcmci_of_match,
1234	}
1235};
1236
1237module_platform_driver(mxcmci_driver);
1238
1239MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1240MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1241MODULE_LICENSE("GPL");
1242MODULE_ALIAS("platform:mxc-mmc");
v4.6
 
   1/*
   2 *  linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
   3 *
   4 *  This is a driver for the SDHC controller found in Freescale MX2/MX3
   5 *  SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
   6 *  Unlike the hardware found on MX1, this hardware just works and does
   7 *  not need all the quirks found in imxmmc.c, hence the separate driver.
   8 *
   9 *  Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  10 *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
  11 *
  12 *  derived from pxamci.c by Russell King
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License version 2 as
  16 * published by the Free Software Foundation.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/ioport.h>
  23#include <linux/platform_device.h>
 
  24#include <linux/interrupt.h>
  25#include <linux/irq.h>
  26#include <linux/blkdev.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/mmc/host.h>
  29#include <linux/mmc/card.h>
  30#include <linux/delay.h>
  31#include <linux/clk.h>
  32#include <linux/io.h>
  33#include <linux/gpio.h>
  34#include <linux/regulator/consumer.h>
  35#include <linux/dmaengine.h>
  36#include <linux/types.h>
  37#include <linux/of.h>
  38#include <linux/of_device.h>
  39#include <linux/of_dma.h>
  40#include <linux/of_gpio.h>
  41#include <linux/mmc/slot-gpio.h>
  42
  43#include <asm/dma.h>
  44#include <asm/irq.h>
  45#include <linux/platform_data/mmc-mxcmmc.h>
  46
  47#include <linux/platform_data/dma-imx.h>
  48
  49#define DRIVER_NAME "mxc-mmc"
  50#define MXCMCI_TIMEOUT_MS 10000
  51
  52#define MMC_REG_STR_STP_CLK		0x00
  53#define MMC_REG_STATUS			0x04
  54#define MMC_REG_CLK_RATE		0x08
  55#define MMC_REG_CMD_DAT_CONT		0x0C
  56#define MMC_REG_RES_TO			0x10
  57#define MMC_REG_READ_TO			0x14
  58#define MMC_REG_BLK_LEN			0x18
  59#define MMC_REG_NOB			0x1C
  60#define MMC_REG_REV_NO			0x20
  61#define MMC_REG_INT_CNTR		0x24
  62#define MMC_REG_CMD			0x28
  63#define MMC_REG_ARG			0x2C
  64#define MMC_REG_RES_FIFO		0x34
  65#define MMC_REG_BUFFER_ACCESS		0x38
  66
  67#define STR_STP_CLK_RESET               (1 << 3)
  68#define STR_STP_CLK_START_CLK           (1 << 1)
  69#define STR_STP_CLK_STOP_CLK            (1 << 0)
  70
  71#define STATUS_CARD_INSERTION		(1 << 31)
  72#define STATUS_CARD_REMOVAL		(1 << 30)
  73#define STATUS_YBUF_EMPTY		(1 << 29)
  74#define STATUS_XBUF_EMPTY		(1 << 28)
  75#define STATUS_YBUF_FULL		(1 << 27)
  76#define STATUS_XBUF_FULL		(1 << 26)
  77#define STATUS_BUF_UND_RUN		(1 << 25)
  78#define STATUS_BUF_OVFL			(1 << 24)
  79#define STATUS_SDIO_INT_ACTIVE		(1 << 14)
  80#define STATUS_END_CMD_RESP		(1 << 13)
  81#define STATUS_WRITE_OP_DONE		(1 << 12)
  82#define STATUS_DATA_TRANS_DONE		(1 << 11)
  83#define STATUS_READ_OP_DONE		(1 << 11)
  84#define STATUS_WR_CRC_ERROR_CODE_MASK	(3 << 10)
  85#define STATUS_CARD_BUS_CLK_RUN		(1 << 8)
  86#define STATUS_BUF_READ_RDY		(1 << 7)
  87#define STATUS_BUF_WRITE_RDY		(1 << 6)
  88#define STATUS_RESP_CRC_ERR		(1 << 5)
  89#define STATUS_CRC_READ_ERR		(1 << 3)
  90#define STATUS_CRC_WRITE_ERR		(1 << 2)
  91#define STATUS_TIME_OUT_RESP		(1 << 1)
  92#define STATUS_TIME_OUT_READ		(1 << 0)
  93#define STATUS_ERR_MASK			0x2f
  94
  95#define CMD_DAT_CONT_CMD_RESP_LONG_OFF	(1 << 12)
  96#define CMD_DAT_CONT_STOP_READWAIT	(1 << 11)
  97#define CMD_DAT_CONT_START_READWAIT	(1 << 10)
  98#define CMD_DAT_CONT_BUS_WIDTH_4	(2 << 8)
  99#define CMD_DAT_CONT_INIT		(1 << 7)
 100#define CMD_DAT_CONT_WRITE		(1 << 4)
 101#define CMD_DAT_CONT_DATA_ENABLE	(1 << 3)
 102#define CMD_DAT_CONT_RESPONSE_48BIT_CRC	(1 << 0)
 103#define CMD_DAT_CONT_RESPONSE_136BIT	(2 << 0)
 104#define CMD_DAT_CONT_RESPONSE_48BIT	(3 << 0)
 105
 106#define INT_SDIO_INT_WKP_EN		(1 << 18)
 107#define INT_CARD_INSERTION_WKP_EN	(1 << 17)
 108#define INT_CARD_REMOVAL_WKP_EN		(1 << 16)
 109#define INT_CARD_INSERTION_EN		(1 << 15)
 110#define INT_CARD_REMOVAL_EN		(1 << 14)
 111#define INT_SDIO_IRQ_EN			(1 << 13)
 112#define INT_DAT0_EN			(1 << 12)
 113#define INT_BUF_READ_EN			(1 << 4)
 114#define INT_BUF_WRITE_EN		(1 << 3)
 115#define INT_END_CMD_RES_EN		(1 << 2)
 116#define INT_WRITE_OP_DONE_EN		(1 << 1)
 117#define INT_READ_OP_EN			(1 << 0)
 118
 119enum mxcmci_type {
 120	IMX21_MMC,
 121	IMX31_MMC,
 122	MPC512X_MMC,
 123};
 124
 125struct mxcmci_host {
 126	struct mmc_host		*mmc;
 127	void __iomem		*base;
 128	dma_addr_t		phys_base;
 129	int			detect_irq;
 130	struct dma_chan		*dma;
 131	struct dma_async_tx_descriptor *desc;
 132	int			do_dma;
 133	int			default_irq_mask;
 134	int			use_sdio;
 135	unsigned int		power_mode;
 136	struct imxmmc_platform_data *pdata;
 137
 138	struct mmc_request	*req;
 139	struct mmc_command	*cmd;
 140	struct mmc_data		*data;
 141
 142	unsigned int		datasize;
 143	unsigned int		dma_dir;
 144
 145	u16			rev_no;
 146	unsigned int		cmdat;
 147
 148	struct clk		*clk_ipg;
 149	struct clk		*clk_per;
 150
 151	int			clock;
 152
 153	struct work_struct	datawork;
 154	spinlock_t		lock;
 155
 156	int			burstlen;
 157	int			dmareq;
 158	struct dma_slave_config dma_slave_config;
 159	struct imx_dma_data	dma_data;
 160
 161	struct timer_list	watchdog;
 162	enum mxcmci_type	devtype;
 163};
 164
 165static const struct platform_device_id mxcmci_devtype[] = {
 166	{
 167		.name = "imx21-mmc",
 168		.driver_data = IMX21_MMC,
 169	}, {
 170		.name = "imx31-mmc",
 171		.driver_data = IMX31_MMC,
 172	}, {
 173		.name = "mpc512x-sdhc",
 174		.driver_data = MPC512X_MMC,
 175	}, {
 176		/* sentinel */
 177	}
 178};
 179MODULE_DEVICE_TABLE(platform, mxcmci_devtype);
 180
 181static const struct of_device_id mxcmci_of_match[] = {
 182	{
 183		.compatible = "fsl,imx21-mmc",
 184		.data = &mxcmci_devtype[IMX21_MMC],
 185	}, {
 186		.compatible = "fsl,imx31-mmc",
 187		.data = &mxcmci_devtype[IMX31_MMC],
 188	}, {
 189		.compatible = "fsl,mpc5121-sdhc",
 190		.data = &mxcmci_devtype[MPC512X_MMC],
 191	}, {
 192		/* sentinel */
 193	}
 194};
 195MODULE_DEVICE_TABLE(of, mxcmci_of_match);
 196
 197static inline int is_imx31_mmc(struct mxcmci_host *host)
 198{
 199	return host->devtype == IMX31_MMC;
 200}
 201
 202static inline int is_mpc512x_mmc(struct mxcmci_host *host)
 203{
 204	return host->devtype == MPC512X_MMC;
 205}
 206
 207static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg)
 208{
 209	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 210		return ioread32be(host->base + reg);
 211	else
 212		return readl(host->base + reg);
 213}
 214
 215static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg)
 216{
 217	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 218		iowrite32be(val, host->base + reg);
 219	else
 220		writel(val, host->base + reg);
 221}
 222
 223static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg)
 224{
 225	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 226		return ioread32be(host->base + reg);
 227	else
 228		return readw(host->base + reg);
 229}
 230
 231static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
 232{
 233	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 234		iowrite32be(val, host->base + reg);
 235	else
 236		writew(val, host->base + reg);
 237}
 238
 239static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 240
 241static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd)
 242{
 243	if (!IS_ERR(host->mmc->supply.vmmc)) {
 244		if (host->power_mode == MMC_POWER_UP)
 245			mmc_regulator_set_ocr(host->mmc,
 246					      host->mmc->supply.vmmc, vdd);
 247		else if (host->power_mode == MMC_POWER_OFF)
 248			mmc_regulator_set_ocr(host->mmc,
 249					      host->mmc->supply.vmmc, 0);
 250	}
 251
 252	if (host->pdata && host->pdata->setpower)
 253		host->pdata->setpower(mmc_dev(host->mmc), vdd);
 254}
 255
 256static inline int mxcmci_use_dma(struct mxcmci_host *host)
 257{
 258	return host->do_dma;
 259}
 260
 261static void mxcmci_softreset(struct mxcmci_host *host)
 262{
 263	int i;
 264
 265	dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
 266
 267	/* reset sequence */
 268	mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK);
 269	mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
 270			MMC_REG_STR_STP_CLK);
 271
 272	for (i = 0; i < 8; i++)
 273		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 274
 275	mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
 276}
 277
 278#if IS_ENABLED(CONFIG_PPC_MPC512x)
 279static inline void buffer_swap32(u32 *buf, int len)
 280{
 281	int i;
 282
 283	for (i = 0; i < ((len + 3) / 4); i++) {
 284		*buf = swab32(*buf);
 285		buf++;
 286	}
 287}
 288
 289static void mxcmci_swap_buffers(struct mmc_data *data)
 290{
 291	struct scatterlist *sg;
 292	int i;
 
 
 
 
 
 
 
 
 293
 294	for_each_sg(data->sg, sg, data->sg_len, i)
 295		buffer_swap32(sg_virt(sg), sg->length);
 296}
 297#else
 298static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
 299#endif
 300
 301static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 302{
 303	unsigned int nob = data->blocks;
 304	unsigned int blksz = data->blksz;
 305	unsigned int datasize = nob * blksz;
 306	struct scatterlist *sg;
 307	enum dma_transfer_direction slave_dirn;
 308	int i, nents;
 309
 310	host->data = data;
 311	data->bytes_xfered = 0;
 312
 313	mxcmci_writew(host, nob, MMC_REG_NOB);
 314	mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);
 315	host->datasize = datasize;
 316
 317	if (!mxcmci_use_dma(host))
 318		return 0;
 319
 320	for_each_sg(data->sg, sg, data->sg_len, i) {
 321		if (sg->offset & 3 || sg->length & 3 || sg->length < 512) {
 322			host->do_dma = 0;
 323			return 0;
 324		}
 325	}
 326
 327	if (data->flags & MMC_DATA_READ) {
 328		host->dma_dir = DMA_FROM_DEVICE;
 329		slave_dirn = DMA_DEV_TO_MEM;
 330	} else {
 331		host->dma_dir = DMA_TO_DEVICE;
 332		slave_dirn = DMA_MEM_TO_DEV;
 333
 334		mxcmci_swap_buffers(data);
 335	}
 336
 337	nents = dma_map_sg(host->dma->device->dev, data->sg,
 338				     data->sg_len,  host->dma_dir);
 339	if (nents != data->sg_len)
 340		return -EINVAL;
 341
 342	host->desc = dmaengine_prep_slave_sg(host->dma,
 343		data->sg, data->sg_len, slave_dirn,
 344		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 345
 346	if (!host->desc) {
 347		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 348				host->dma_dir);
 349		host->do_dma = 0;
 350		return 0; /* Fall back to PIO */
 351	}
 352	wmb();
 353
 354	dmaengine_submit(host->desc);
 355	dma_async_issue_pending(host->dma);
 356
 357	mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
 358
 359	return 0;
 360}
 361
 362static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
 363static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
 364
 365static void mxcmci_dma_callback(void *data)
 366{
 367	struct mxcmci_host *host = data;
 368	u32 stat;
 369
 370	del_timer(&host->watchdog);
 371
 372	stat = mxcmci_readl(host, MMC_REG_STATUS);
 373
 374	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 375
 376	mxcmci_data_done(host, stat);
 377}
 378
 379static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
 380		unsigned int cmdat)
 381{
 382	u32 int_cntr = host->default_irq_mask;
 383	unsigned long flags;
 384
 385	WARN_ON(host->cmd != NULL);
 386	host->cmd = cmd;
 387
 388	switch (mmc_resp_type(cmd)) {
 389	case MMC_RSP_R1: /* short CRC, OPCODE */
 390	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
 391		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
 392		break;
 393	case MMC_RSP_R2: /* long 136 bit + CRC */
 394		cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
 395		break;
 396	case MMC_RSP_R3: /* short */
 397		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
 398		break;
 399	case MMC_RSP_NONE:
 400		break;
 401	default:
 402		dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
 403				mmc_resp_type(cmd));
 404		cmd->error = -EINVAL;
 405		return -EINVAL;
 406	}
 407
 408	int_cntr = INT_END_CMD_RES_EN;
 409
 410	if (mxcmci_use_dma(host)) {
 411		if (host->dma_dir == DMA_FROM_DEVICE) {
 412			host->desc->callback = mxcmci_dma_callback;
 413			host->desc->callback_param = host;
 414		} else {
 415			int_cntr |= INT_WRITE_OP_DONE_EN;
 416		}
 417	}
 418
 419	spin_lock_irqsave(&host->lock, flags);
 420	if (host->use_sdio)
 421		int_cntr |= INT_SDIO_IRQ_EN;
 422	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 423	spin_unlock_irqrestore(&host->lock, flags);
 424
 425	mxcmci_writew(host, cmd->opcode, MMC_REG_CMD);
 426	mxcmci_writel(host, cmd->arg, MMC_REG_ARG);
 427	mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);
 428
 429	return 0;
 430}
 431
 432static void mxcmci_finish_request(struct mxcmci_host *host,
 433		struct mmc_request *req)
 434{
 435	u32 int_cntr = host->default_irq_mask;
 436	unsigned long flags;
 437
 438	spin_lock_irqsave(&host->lock, flags);
 439	if (host->use_sdio)
 440		int_cntr |= INT_SDIO_IRQ_EN;
 441	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 442	spin_unlock_irqrestore(&host->lock, flags);
 443
 444	host->req = NULL;
 445	host->cmd = NULL;
 446	host->data = NULL;
 447
 448	mmc_request_done(host->mmc, req);
 449}
 450
 451static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
 452{
 453	struct mmc_data *data = host->data;
 454	int data_error;
 455
 456	if (mxcmci_use_dma(host)) {
 457		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 458				host->dma_dir);
 459		mxcmci_swap_buffers(data);
 460	}
 461
 462	if (stat & STATUS_ERR_MASK) {
 463		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
 464				stat);
 465		if (stat & STATUS_CRC_READ_ERR) {
 466			dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
 467			data->error = -EILSEQ;
 468		} else if (stat & STATUS_CRC_WRITE_ERR) {
 469			u32 err_code = (stat >> 9) & 0x3;
 470			if (err_code == 2) { /* No CRC response */
 471				dev_err(mmc_dev(host->mmc),
 472					"%s: No CRC -ETIMEDOUT\n", __func__);
 473				data->error = -ETIMEDOUT;
 474			} else {
 475				dev_err(mmc_dev(host->mmc),
 476					"%s: -EILSEQ\n", __func__);
 477				data->error = -EILSEQ;
 478			}
 479		} else if (stat & STATUS_TIME_OUT_READ) {
 480			dev_err(mmc_dev(host->mmc),
 481				"%s: read -ETIMEDOUT\n", __func__);
 482			data->error = -ETIMEDOUT;
 483		} else {
 484			dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
 485			data->error = -EIO;
 486		}
 487	} else {
 488		data->bytes_xfered = host->datasize;
 489	}
 490
 491	data_error = data->error;
 492
 493	host->data = NULL;
 494
 495	return data_error;
 496}
 497
 498static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
 499{
 500	struct mmc_command *cmd = host->cmd;
 501	int i;
 502	u32 a, b, c;
 503
 504	if (!cmd)
 505		return;
 506
 507	if (stat & STATUS_TIME_OUT_RESP) {
 508		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
 509		cmd->error = -ETIMEDOUT;
 510	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
 511		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
 512		cmd->error = -EILSEQ;
 513	}
 514
 515	if (cmd->flags & MMC_RSP_PRESENT) {
 516		if (cmd->flags & MMC_RSP_136) {
 517			for (i = 0; i < 4; i++) {
 518				a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 519				b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 520				cmd->resp[i] = a << 16 | b;
 521			}
 522		} else {
 523			a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 524			b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 525			c = mxcmci_readw(host, MMC_REG_RES_FIFO);
 526			cmd->resp[0] = a << 24 | b << 8 | c >> 8;
 527		}
 528	}
 529}
 530
 531static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
 532{
 533	u32 stat;
 534	unsigned long timeout = jiffies + HZ;
 535
 536	do {
 537		stat = mxcmci_readl(host, MMC_REG_STATUS);
 538		if (stat & STATUS_ERR_MASK)
 539			return stat;
 540		if (time_after(jiffies, timeout)) {
 541			mxcmci_softreset(host);
 542			mxcmci_set_clk_rate(host, host->clock);
 543			return STATUS_TIME_OUT_READ;
 544		}
 545		if (stat & mask)
 546			return 0;
 547		cpu_relax();
 548	} while (1);
 549}
 550
 551static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
 552{
 553	unsigned int stat;
 554	u32 *buf = _buf;
 555
 556	while (bytes > 3) {
 557		stat = mxcmci_poll_status(host,
 558				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 559		if (stat)
 560			return stat;
 561		*buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 562		bytes -= 4;
 563	}
 564
 565	if (bytes) {
 566		u8 *b = (u8 *)buf;
 567		u32 tmp;
 568
 569		stat = mxcmci_poll_status(host,
 570				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 571		if (stat)
 572			return stat;
 573		tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 574		memcpy(b, &tmp, bytes);
 575	}
 576
 577	return 0;
 578}
 579
 580static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
 581{
 582	unsigned int stat;
 583	u32 *buf = _buf;
 584
 585	while (bytes > 3) {
 586		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 587		if (stat)
 588			return stat;
 589		mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);
 590		bytes -= 4;
 591	}
 592
 593	if (bytes) {
 594		u8 *b = (u8 *)buf;
 595		u32 tmp;
 596
 597		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 598		if (stat)
 599			return stat;
 600
 601		memcpy(&tmp, b, bytes);
 602		mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
 603	}
 604
 605	return mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 606}
 607
 608static int mxcmci_transfer_data(struct mxcmci_host *host)
 609{
 610	struct mmc_data *data = host->req->data;
 611	struct scatterlist *sg;
 612	int stat, i;
 
 613
 614	host->data = data;
 615	host->datasize = 0;
 
 
 616
 617	if (data->flags & MMC_DATA_READ) {
 618		for_each_sg(data->sg, sg, data->sg_len, i) {
 619			stat = mxcmci_pull(host, sg_virt(sg), sg->length);
 
 620			if (stat)
 621				return stat;
 622			host->datasize += sg->length;
 623		}
 624	} else {
 625		for_each_sg(data->sg, sg, data->sg_len, i) {
 626			stat = mxcmci_push(host, sg_virt(sg), sg->length);
 
 627			if (stat)
 628				return stat;
 629			host->datasize += sg->length;
 630		}
 631		stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
 632		if (stat)
 633			return stat;
 634	}
 635	return 0;
 
 
 
 636}
 637
 638static void mxcmci_datawork(struct work_struct *work)
 639{
 640	struct mxcmci_host *host = container_of(work, struct mxcmci_host,
 641						  datawork);
 642	int datastat = mxcmci_transfer_data(host);
 643
 644	mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 645		MMC_REG_STATUS);
 646	mxcmci_finish_data(host, datastat);
 647
 648	if (host->req->stop) {
 649		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 650			mxcmci_finish_request(host, host->req);
 651			return;
 652		}
 653	} else {
 654		mxcmci_finish_request(host, host->req);
 655	}
 656}
 657
 658static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
 659{
 660	struct mmc_request *req;
 661	int data_error;
 662	unsigned long flags;
 663
 664	spin_lock_irqsave(&host->lock, flags);
 665
 666	if (!host->data) {
 667		spin_unlock_irqrestore(&host->lock, flags);
 668		return;
 669	}
 670
 671	if (!host->req) {
 672		spin_unlock_irqrestore(&host->lock, flags);
 673		return;
 674	}
 675
 676	req = host->req;
 677	if (!req->stop)
 678		host->req = NULL; /* we will handle finish req below */
 679
 680	data_error = mxcmci_finish_data(host, stat);
 681
 682	spin_unlock_irqrestore(&host->lock, flags);
 683
 
 
 
 684	mxcmci_read_response(host, stat);
 685	host->cmd = NULL;
 686
 687	if (req->stop) {
 688		if (mxcmci_start_cmd(host, req->stop, 0)) {
 689			mxcmci_finish_request(host, req);
 690			return;
 691		}
 692	} else {
 693		mxcmci_finish_request(host, req);
 694	}
 695}
 696
 697static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
 698{
 699	mxcmci_read_response(host, stat);
 700	host->cmd = NULL;
 701
 702	if (!host->data && host->req) {
 703		mxcmci_finish_request(host, host->req);
 704		return;
 705	}
 706
 707	/* For the DMA case the DMA engine handles the data transfer
 708	 * automatically. For non DMA we have to do it ourselves.
 709	 * Don't do it in interrupt context though.
 710	 */
 711	if (!mxcmci_use_dma(host) && host->data)
 712		schedule_work(&host->datawork);
 713
 714}
 715
 716static irqreturn_t mxcmci_irq(int irq, void *devid)
 717{
 718	struct mxcmci_host *host = devid;
 719	unsigned long flags;
 720	bool sdio_irq;
 721	u32 stat;
 722
 723	stat = mxcmci_readl(host, MMC_REG_STATUS);
 724	mxcmci_writel(host,
 725		stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
 726			 STATUS_WRITE_OP_DONE),
 727		MMC_REG_STATUS);
 728
 729	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 730
 731	spin_lock_irqsave(&host->lock, flags);
 732	sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
 733	spin_unlock_irqrestore(&host->lock, flags);
 734
 735	if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE)))
 736		mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS);
 737
 738	if (sdio_irq) {
 739		mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
 740		mmc_signal_sdio_irq(host->mmc);
 741	}
 742
 743	if (stat & STATUS_END_CMD_RESP)
 744		mxcmci_cmd_done(host, stat);
 745
 746	if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) {
 747		del_timer(&host->watchdog);
 748		mxcmci_data_done(host, stat);
 749	}
 750
 751	if (host->default_irq_mask &&
 752		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
 753		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
 754
 755	return IRQ_HANDLED;
 756}
 757
 758static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
 759{
 760	struct mxcmci_host *host = mmc_priv(mmc);
 761	unsigned int cmdat = host->cmdat;
 762	int error;
 763
 764	WARN_ON(host->req != NULL);
 765
 766	host->req = req;
 767	host->cmdat &= ~CMD_DAT_CONT_INIT;
 768
 769	if (host->dma)
 770		host->do_dma = 1;
 771
 772	if (req->data) {
 773		error = mxcmci_setup_data(host, req->data);
 774		if (error) {
 775			req->cmd->error = error;
 776			goto out;
 777		}
 778
 779
 780		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
 781
 782		if (req->data->flags & MMC_DATA_WRITE)
 783			cmdat |= CMD_DAT_CONT_WRITE;
 784	}
 785
 786	error = mxcmci_start_cmd(host, req->cmd, cmdat);
 787
 788out:
 789	if (error)
 790		mxcmci_finish_request(host, req);
 791}
 792
 793static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
 794{
 795	unsigned int divider;
 796	int prescaler = 0;
 797	unsigned int clk_in = clk_get_rate(host->clk_per);
 798
 799	while (prescaler <= 0x800) {
 800		for (divider = 1; divider <= 0xF; divider++) {
 801			int x;
 802
 803			x = (clk_in / (divider + 1));
 804
 805			if (prescaler)
 806				x /= (prescaler * 2);
 807
 808			if (x <= clk_ios)
 809				break;
 810		}
 811		if (divider < 0x10)
 812			break;
 813
 814		if (prescaler == 0)
 815			prescaler = 1;
 816		else
 817			prescaler <<= 1;
 818	}
 819
 820	mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);
 821
 822	dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
 823			prescaler, divider, clk_in, clk_ios);
 824}
 825
 826static int mxcmci_setup_dma(struct mmc_host *mmc)
 827{
 828	struct mxcmci_host *host = mmc_priv(mmc);
 829	struct dma_slave_config *config = &host->dma_slave_config;
 830
 831	config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 832	config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 833	config->dst_addr_width = 4;
 834	config->src_addr_width = 4;
 835	config->dst_maxburst = host->burstlen;
 836	config->src_maxburst = host->burstlen;
 837	config->device_fc = false;
 838
 839	return dmaengine_slave_config(host->dma, config);
 840}
 841
 842static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 843{
 844	struct mxcmci_host *host = mmc_priv(mmc);
 845	int burstlen, ret;
 846
 847	/*
 848	 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value  0)
 849	 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
 850	 */
 851	if (ios->bus_width == MMC_BUS_WIDTH_4)
 852		burstlen = 16;
 853	else
 854		burstlen = 4;
 855
 856	if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
 857		host->burstlen = burstlen;
 858		ret = mxcmci_setup_dma(mmc);
 859		if (ret) {
 860			dev_err(mmc_dev(host->mmc),
 861				"failed to config DMA channel. Falling back to PIO\n");
 862			dma_release_channel(host->dma);
 863			host->do_dma = 0;
 864			host->dma = NULL;
 865		}
 866	}
 867
 868	if (ios->bus_width == MMC_BUS_WIDTH_4)
 869		host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
 870	else
 871		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 872
 873	if (host->power_mode != ios->power_mode) {
 874		host->power_mode = ios->power_mode;
 875		mxcmci_set_power(host, ios->vdd);
 876
 877		if (ios->power_mode == MMC_POWER_ON)
 878			host->cmdat |= CMD_DAT_CONT_INIT;
 879	}
 880
 881	if (ios->clock) {
 882		mxcmci_set_clk_rate(host, ios->clock);
 883		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 884	} else {
 885		mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);
 886	}
 887
 888	host->clock = ios->clock;
 889}
 890
 891static irqreturn_t mxcmci_detect_irq(int irq, void *data)
 892{
 893	struct mmc_host *mmc = data;
 894
 895	dev_dbg(mmc_dev(mmc), "%s\n", __func__);
 896
 897	mmc_detect_change(mmc, msecs_to_jiffies(250));
 898	return IRQ_HANDLED;
 899}
 900
 901static int mxcmci_get_ro(struct mmc_host *mmc)
 902{
 903	struct mxcmci_host *host = mmc_priv(mmc);
 904
 905	if (host->pdata && host->pdata->get_ro)
 906		return !!host->pdata->get_ro(mmc_dev(mmc));
 907	/*
 908	 * If board doesn't support read only detection (no mmc_gpio
 909	 * context or gpio is invalid), then let the mmc core decide
 910	 * what to do.
 911	 */
 912	return mmc_gpio_get_ro(mmc);
 913}
 914
 915static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 916{
 917	struct mxcmci_host *host = mmc_priv(mmc);
 918	unsigned long flags;
 919	u32 int_cntr;
 920
 921	spin_lock_irqsave(&host->lock, flags);
 922	host->use_sdio = enable;
 923	int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);
 924
 925	if (enable)
 926		int_cntr |= INT_SDIO_IRQ_EN;
 927	else
 928		int_cntr &= ~INT_SDIO_IRQ_EN;
 929
 930	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 931	spin_unlock_irqrestore(&host->lock, flags);
 932}
 933
 934static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
 935{
 936	struct mxcmci_host *mxcmci = mmc_priv(host);
 937
 938	/*
 939	 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
 940	 * multi-block transfers when connected SDIO peripheral doesn't
 941	 * drive the BUSY line as required by the specs.
 942	 * One way to prevent this is to only allow 1-bit transfers.
 943	 */
 944
 945	if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
 946		host->caps &= ~MMC_CAP_4_BIT_DATA;
 947	else
 948		host->caps |= MMC_CAP_4_BIT_DATA;
 949}
 950
 951static bool filter(struct dma_chan *chan, void *param)
 952{
 953	struct mxcmci_host *host = param;
 954
 955	if (!imx_dma_is_general_purpose(chan))
 956		return false;
 957
 958	chan->private = &host->dma_data;
 959
 960	return true;
 961}
 962
 963static void mxcmci_watchdog(unsigned long data)
 964{
 965	struct mmc_host *mmc = (struct mmc_host *)data;
 966	struct mxcmci_host *host = mmc_priv(mmc);
 967	struct mmc_request *req = host->req;
 968	unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
 969
 970	if (host->dma_dir == DMA_FROM_DEVICE) {
 971		dmaengine_terminate_all(host->dma);
 972		dev_err(mmc_dev(host->mmc),
 973			"%s: read time out (status = 0x%08x)\n",
 974			__func__, stat);
 975	} else {
 976		dev_err(mmc_dev(host->mmc),
 977			"%s: write time out (status = 0x%08x)\n",
 978			__func__, stat);
 979		mxcmci_softreset(host);
 980	}
 981
 982	/* Mark transfer as erroneus and inform the upper layers */
 983
 984	if (host->data)
 985		host->data->error = -ETIMEDOUT;
 986	host->req = NULL;
 987	host->cmd = NULL;
 988	host->data = NULL;
 989	mmc_request_done(host->mmc, req);
 990}
 991
 992static const struct mmc_host_ops mxcmci_ops = {
 993	.request		= mxcmci_request,
 994	.set_ios		= mxcmci_set_ios,
 995	.get_ro			= mxcmci_get_ro,
 996	.enable_sdio_irq	= mxcmci_enable_sdio_irq,
 997	.init_card		= mxcmci_init_card,
 998};
 999
1000static int mxcmci_probe(struct platform_device *pdev)
1001{
1002	struct mmc_host *mmc;
1003	struct mxcmci_host *host;
1004	struct resource *res;
1005	int ret = 0, irq;
1006	bool dat3_card_detect = false;
1007	dma_cap_mask_t mask;
1008	const struct of_device_id *of_id;
1009	struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
1010
1011	pr_info("i.MX/MPC512x SDHC driver\n");
1012
1013	of_id = of_match_device(mxcmci_of_match, &pdev->dev);
1014
1015	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1016	irq = platform_get_irq(pdev, 0);
1017	if (irq < 0)
1018		return -EINVAL;
1019
1020	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1021	if (!mmc)
1022		return -ENOMEM;
1023
1024	host = mmc_priv(mmc);
1025
1026	host->base = devm_ioremap_resource(&pdev->dev, res);
1027	if (IS_ERR(host->base)) {
1028		ret = PTR_ERR(host->base);
1029		goto out_free;
1030	}
1031
1032	host->phys_base = res->start;
1033
1034	ret = mmc_of_parse(mmc);
1035	if (ret)
1036		goto out_free;
1037	mmc->ops = &mxcmci_ops;
1038
1039	/* For devicetree parsing, the bus width is read from devicetree */
1040	if (pdata)
1041		mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1042	else
1043		mmc->caps |= MMC_CAP_SDIO_IRQ;
1044
1045	/* MMC core transfer sizes tunable parameters */
1046	mmc->max_blk_size = 2048;
1047	mmc->max_blk_count = 65535;
1048	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1049	mmc->max_seg_size = mmc->max_req_size;
1050
1051	if (of_id) {
1052		const struct platform_device_id *id_entry = of_id->data;
1053		host->devtype = id_entry->driver_data;
1054	} else {
1055		host->devtype = pdev->id_entry->driver_data;
1056	}
1057
1058	/* adjust max_segs after devtype detection */
1059	if (!is_mpc512x_mmc(host))
1060		mmc->max_segs = 64;
1061
1062	host->mmc = mmc;
1063	host->pdata = pdata;
1064	spin_lock_init(&host->lock);
1065
1066	if (pdata)
1067		dat3_card_detect = pdata->dat3_card_detect;
1068	else if (!(mmc->caps & MMC_CAP_NONREMOVABLE)
1069			&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
1070		dat3_card_detect = true;
1071
1072	ret = mmc_regulator_get_supply(mmc);
1073	if (ret == -EPROBE_DEFER)
1074		goto out_free;
1075
1076	if (!mmc->ocr_avail) {
1077		if (pdata && pdata->ocr_avail)
1078			mmc->ocr_avail = pdata->ocr_avail;
1079		else
1080			mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1081	}
1082
1083	if (dat3_card_detect)
1084		host->default_irq_mask =
1085			INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
1086	else
1087		host->default_irq_mask = 0;
1088
1089	host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1090	if (IS_ERR(host->clk_ipg)) {
1091		ret = PTR_ERR(host->clk_ipg);
1092		goto out_free;
1093	}
1094
1095	host->clk_per = devm_clk_get(&pdev->dev, "per");
1096	if (IS_ERR(host->clk_per)) {
1097		ret = PTR_ERR(host->clk_per);
1098		goto out_free;
1099	}
1100
1101	clk_prepare_enable(host->clk_per);
1102	clk_prepare_enable(host->clk_ipg);
 
 
 
 
 
1103
1104	mxcmci_softreset(host);
1105
1106	host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);
1107	if (host->rev_no != 0x400) {
1108		ret = -ENODEV;
1109		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1110			host->rev_no);
1111		goto out_clk_put;
1112	}
1113
1114	mmc->f_min = clk_get_rate(host->clk_per) >> 16;
1115	mmc->f_max = clk_get_rate(host->clk_per) >> 1;
1116
1117	/* recommended in data sheet */
1118	mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO);
1119
1120	mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
1121
1122	if (!host->pdata) {
1123		host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
 
 
 
 
 
 
 
 
 
1124	} else {
1125		res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1126		if (res) {
1127			host->dmareq = res->start;
1128			host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
1129			host->dma_data.priority = DMA_PRIO_LOW;
1130			host->dma_data.dma_request = host->dmareq;
1131			dma_cap_zero(mask);
1132			dma_cap_set(DMA_SLAVE, mask);
1133			host->dma = dma_request_channel(mask, filter, host);
1134		}
1135	}
1136	if (host->dma)
1137		mmc->max_seg_size = dma_get_max_seg_size(
1138				host->dma->device->dev);
1139	else
1140		dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
1141
1142	INIT_WORK(&host->datawork, mxcmci_datawork);
1143
1144	ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0,
1145			       dev_name(&pdev->dev), host);
1146	if (ret)
1147		goto out_free_dma;
1148
1149	platform_set_drvdata(pdev, mmc);
1150
1151	if (host->pdata && host->pdata->init) {
1152		ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
1153				host->mmc);
1154		if (ret)
1155			goto out_free_dma;
1156	}
1157
1158	init_timer(&host->watchdog);
1159	host->watchdog.function = &mxcmci_watchdog;
1160	host->watchdog.data = (unsigned long)mmc;
1161
1162	mmc_add_host(mmc);
 
 
1163
1164	return 0;
1165
1166out_free_dma:
1167	if (host->dma)
1168		dma_release_channel(host->dma);
1169
1170out_clk_put:
 
 
1171	clk_disable_unprepare(host->clk_per);
1172	clk_disable_unprepare(host->clk_ipg);
1173
1174out_free:
1175	mmc_free_host(mmc);
1176
1177	return ret;
1178}
1179
1180static int mxcmci_remove(struct platform_device *pdev)
1181{
1182	struct mmc_host *mmc = platform_get_drvdata(pdev);
1183	struct mxcmci_host *host = mmc_priv(mmc);
1184
1185	mmc_remove_host(mmc);
1186
1187	if (host->pdata && host->pdata->exit)
1188		host->pdata->exit(&pdev->dev, mmc);
1189
1190	if (host->dma)
1191		dma_release_channel(host->dma);
1192
1193	clk_disable_unprepare(host->clk_per);
1194	clk_disable_unprepare(host->clk_ipg);
1195
1196	mmc_free_host(mmc);
1197
1198	return 0;
1199}
1200
1201static int __maybe_unused mxcmci_suspend(struct device *dev)
1202{
1203	struct mmc_host *mmc = dev_get_drvdata(dev);
1204	struct mxcmci_host *host = mmc_priv(mmc);
1205
1206	clk_disable_unprepare(host->clk_per);
1207	clk_disable_unprepare(host->clk_ipg);
1208	return 0;
1209}
1210
1211static int __maybe_unused mxcmci_resume(struct device *dev)
1212{
1213	struct mmc_host *mmc = dev_get_drvdata(dev);
1214	struct mxcmci_host *host = mmc_priv(mmc);
 
1215
1216	clk_prepare_enable(host->clk_per);
1217	clk_prepare_enable(host->clk_ipg);
1218	return 0;
 
 
 
 
 
 
1219}
1220
1221static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
1222
1223static struct platform_driver mxcmci_driver = {
1224	.probe		= mxcmci_probe,
1225	.remove		= mxcmci_remove,
1226	.id_table	= mxcmci_devtype,
1227	.driver		= {
1228		.name		= DRIVER_NAME,
1229		.pm	= &mxcmci_pm_ops,
 
1230		.of_match_table	= mxcmci_of_match,
1231	}
1232};
1233
1234module_platform_driver(mxcmci_driver);
1235
1236MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1237MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1238MODULE_LICENSE("GPL");
1239MODULE_ALIAS("platform:mxc-mmc");